scsi-generic: do not snoop the output of failed commands
[qemu/ar7.git] / hw / scsi / scsi-generic.c
blobf9fd2ccfdd9411674697072631a105326ddebb07
1 /*
2 * Generic SCSI Device support
4 * Copyright (c) 2007 Bull S.A.S.
5 * Based on code by Paul Brook
6 * Based on code by Fabrice Bellard
8 * Written by Laurent Vivier <Laurent.Vivier@bull.net>
10 * This code is licensed under the LGPL.
14 #include "qemu/osdep.h"
15 #include "qapi/error.h"
16 #include "qemu/ctype.h"
17 #include "qemu/error-report.h"
18 #include "qemu/module.h"
19 #include "hw/scsi/scsi.h"
20 #include "migration/qemu-file-types.h"
21 #include "hw/qdev-properties.h"
22 #include "hw/qdev-properties-system.h"
23 #include "hw/scsi/emulation.h"
24 #include "sysemu/block-backend.h"
25 #include "trace.h"
27 #ifdef __linux__
29 #include <scsi/sg.h>
30 #include "scsi/constants.h"
32 #ifndef MAX_UINT
33 #define MAX_UINT ((unsigned int)-1)
34 #endif
36 typedef struct SCSIGenericReq {
37 SCSIRequest req;
38 uint8_t *buf;
39 int buflen;
40 int len;
41 sg_io_hdr_t io_header;
42 } SCSIGenericReq;
44 static void scsi_generic_save_request(QEMUFile *f, SCSIRequest *req)
46 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
48 qemu_put_sbe32s(f, &r->buflen);
49 if (r->buflen && r->req.cmd.mode == SCSI_XFER_TO_DEV) {
50 assert(!r->req.sg);
51 qemu_put_buffer(f, r->buf, r->req.cmd.xfer);
55 static void scsi_generic_load_request(QEMUFile *f, SCSIRequest *req)
57 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
59 qemu_get_sbe32s(f, &r->buflen);
60 if (r->buflen && r->req.cmd.mode == SCSI_XFER_TO_DEV) {
61 assert(!r->req.sg);
62 qemu_get_buffer(f, r->buf, r->req.cmd.xfer);
66 static void scsi_free_request(SCSIRequest *req)
68 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
70 g_free(r->buf);
73 /* Helper function for command completion. */
74 static void scsi_command_complete_noio(SCSIGenericReq *r, int ret)
76 int status;
77 SCSISense sense;
79 assert(r->req.aiocb == NULL);
81 if (r->req.io_canceled) {
82 scsi_req_cancel_complete(&r->req);
83 goto done;
85 status = sg_io_sense_from_errno(-ret, &r->io_header, &sense);
86 if (status == CHECK_CONDITION) {
87 if (r->io_header.driver_status & SG_ERR_DRIVER_SENSE) {
88 r->req.sense_len = r->io_header.sb_len_wr;
89 } else {
90 scsi_req_build_sense(&r->req, sense);
94 trace_scsi_generic_command_complete_noio(r, r->req.tag, status);
96 scsi_req_complete(&r->req, status);
97 done:
98 scsi_req_unref(&r->req);
101 static void scsi_command_complete(void *opaque, int ret)
103 SCSIGenericReq *r = (SCSIGenericReq *)opaque;
104 SCSIDevice *s = r->req.dev;
106 assert(r->req.aiocb != NULL);
107 r->req.aiocb = NULL;
109 aio_context_acquire(blk_get_aio_context(s->conf.blk));
110 scsi_command_complete_noio(r, ret);
111 aio_context_release(blk_get_aio_context(s->conf.blk));
114 static int execute_command(BlockBackend *blk,
115 SCSIGenericReq *r, int direction,
116 BlockCompletionFunc *complete)
118 SCSIDevice *s = r->req.dev;
120 r->io_header.interface_id = 'S';
121 r->io_header.dxfer_direction = direction;
122 r->io_header.dxferp = r->buf;
123 r->io_header.dxfer_len = r->buflen;
124 r->io_header.cmdp = r->req.cmd.buf;
125 r->io_header.cmd_len = r->req.cmd.len;
126 r->io_header.mx_sb_len = sizeof(r->req.sense);
127 r->io_header.sbp = r->req.sense;
128 r->io_header.timeout = s->io_timeout * 1000;
129 r->io_header.usr_ptr = r;
130 r->io_header.flags |= SG_FLAG_DIRECT_IO;
132 trace_scsi_generic_aio_sgio_command(r->req.tag, r->req.cmd.buf[0],
133 r->io_header.timeout);
134 r->req.aiocb = blk_aio_ioctl(blk, SG_IO, &r->io_header, complete, r);
135 if (r->req.aiocb == NULL) {
136 return -EIO;
139 return 0;
142 static void scsi_handle_inquiry_reply(SCSIGenericReq *r, SCSIDevice *s)
144 uint8_t page, page_idx;
147 * EVPD set to zero returns the standard INQUIRY data.
149 * Check if scsi_version is unset (-1) to avoid re-defining it
150 * each time an INQUIRY with standard data is received.
151 * scsi_version is initialized with -1 in scsi_generic_reset
152 * and scsi_disk_reset, making sure that we'll set the
153 * scsi_version after a reset. If the version field of the
154 * INQUIRY response somehow changes after a guest reboot,
155 * we'll be able to keep track of it.
157 * On SCSI-2 and older, first 3 bits of byte 2 is the
158 * ANSI-approved version, while on later versions the
159 * whole byte 2 contains the version. Check if we're dealing
160 * with a newer version and, in that case, assign the
161 * whole byte.
163 if (s->scsi_version == -1 && !(r->req.cmd.buf[1] & 0x01)) {
164 s->scsi_version = r->buf[2] & 0x07;
165 if (s->scsi_version > 2) {
166 s->scsi_version = r->buf[2];
170 if ((s->type == TYPE_DISK || s->type == TYPE_ZBC) &&
171 (r->req.cmd.buf[1] & 0x01)) {
172 page = r->req.cmd.buf[2];
173 if (page == 0xb0) {
174 uint32_t max_transfer =
175 blk_get_max_transfer(s->conf.blk) / s->blocksize;
177 assert(max_transfer);
178 stl_be_p(&r->buf[8], max_transfer);
179 /* Also take care of the opt xfer len. */
180 stl_be_p(&r->buf[12],
181 MIN_NON_ZERO(max_transfer, ldl_be_p(&r->buf[12])));
182 } else if (s->needs_vpd_bl_emulation && page == 0x00 && r->buflen >= 4) {
184 * Now we're capable of supplying the VPD Block Limits
185 * response if the hardware can't. Add it in the INQUIRY
186 * Supported VPD pages response in case we are using the
187 * emulation for this device.
189 * This way, the guest kernel will be aware of the support
190 * and will use it to proper setup the SCSI device.
192 * VPD page numbers must be sorted, so insert 0xb0 at the
193 * right place with an in-place insert. When the while loop
194 * begins the device response is at r[0] to r[page_idx - 1].
196 page_idx = lduw_be_p(r->buf + 2) + 4;
197 page_idx = MIN(page_idx, r->buflen);
198 while (page_idx > 4 && r->buf[page_idx - 1] >= 0xb0) {
199 if (page_idx < r->buflen) {
200 r->buf[page_idx] = r->buf[page_idx - 1];
202 page_idx--;
204 if (page_idx < r->buflen) {
205 r->buf[page_idx] = 0xb0;
207 stw_be_p(r->buf + 2, lduw_be_p(r->buf + 2) + 1);
212 static int scsi_generic_emulate_block_limits(SCSIGenericReq *r, SCSIDevice *s)
214 int len;
215 uint8_t buf[64];
217 SCSIBlockLimits bl = {
218 .max_io_sectors = blk_get_max_transfer(s->conf.blk) / s->blocksize
221 memset(r->buf, 0, r->buflen);
222 stb_p(buf, s->type);
223 stb_p(buf + 1, 0xb0);
224 len = scsi_emulate_block_limits(buf + 4, &bl);
225 assert(len <= sizeof(buf) - 4);
226 stw_be_p(buf + 2, len);
228 memcpy(r->buf, buf, MIN(r->buflen, len + 4));
230 r->io_header.sb_len_wr = 0;
233 * We have valid contents in the reply buffer but the
234 * io_header can report a sense error coming from
235 * the hardware in scsi_command_complete_noio. Clean
236 * up the io_header to avoid reporting it.
238 r->io_header.driver_status = 0;
239 r->io_header.status = 0;
241 return r->buflen;
244 static void scsi_read_complete(void * opaque, int ret)
246 SCSIGenericReq *r = (SCSIGenericReq *)opaque;
247 SCSIDevice *s = r->req.dev;
248 int len;
250 assert(r->req.aiocb != NULL);
251 r->req.aiocb = NULL;
253 aio_context_acquire(blk_get_aio_context(s->conf.blk));
255 if (ret || r->req.io_canceled) {
256 scsi_command_complete_noio(r, ret);
257 goto done;
260 len = r->io_header.dxfer_len - r->io_header.resid;
261 trace_scsi_generic_read_complete(r->req.tag, len);
263 r->len = -1;
265 if (r->io_header.driver_status & SG_ERR_DRIVER_SENSE) {
266 SCSISense sense =
267 scsi_parse_sense_buf(r->req.sense, r->io_header.sb_len_wr);
270 * Check if this is a VPD Block Limits request that
271 * resulted in sense error but would need emulation.
272 * In this case, emulate a valid VPD response.
274 if (sense.key == ILLEGAL_REQUEST &&
275 s->needs_vpd_bl_emulation &&
276 r->req.cmd.buf[0] == INQUIRY &&
277 (r->req.cmd.buf[1] & 0x01) &&
278 r->req.cmd.buf[2] == 0xb0) {
279 len = scsi_generic_emulate_block_limits(r, s);
281 * It's okay to jup to req_complete: no need to
282 * let scsi_handle_inquiry_reply handle an
283 * INQUIRY VPD BL request we created manually.
286 if (sense.key) {
287 goto req_complete;
291 if (r->io_header.host_status != SCSI_HOST_OK ||
292 (r->io_header.driver_status & SG_ERR_DRIVER_TIMEOUT) ||
293 r->io_header.status != GOOD ||
294 len == 0) {
295 scsi_command_complete_noio(r, 0);
296 goto done;
299 /* Snoop READ CAPACITY output to set the blocksize. */
300 if (r->req.cmd.buf[0] == READ_CAPACITY_10 &&
301 (ldl_be_p(&r->buf[0]) != 0xffffffffU || s->max_lba == 0)) {
302 s->blocksize = ldl_be_p(&r->buf[4]);
303 s->max_lba = ldl_be_p(&r->buf[0]) & 0xffffffffULL;
304 } else if (r->req.cmd.buf[0] == SERVICE_ACTION_IN_16 &&
305 (r->req.cmd.buf[1] & 31) == SAI_READ_CAPACITY_16) {
306 s->blocksize = ldl_be_p(&r->buf[8]);
307 s->max_lba = ldq_be_p(&r->buf[0]);
309 blk_set_guest_block_size(s->conf.blk, s->blocksize);
312 * Patch MODE SENSE device specific parameters if the BDS is opened
313 * readonly.
315 if ((s->type == TYPE_DISK || s->type == TYPE_TAPE || s->type == TYPE_ZBC) &&
316 !blk_is_writable(s->conf.blk) &&
317 (r->req.cmd.buf[0] == MODE_SENSE ||
318 r->req.cmd.buf[0] == MODE_SENSE_10) &&
319 (r->req.cmd.buf[1] & 0x8) == 0) {
320 if (r->req.cmd.buf[0] == MODE_SENSE) {
321 r->buf[2] |= 0x80;
322 } else {
323 r->buf[3] |= 0x80;
326 if (r->req.cmd.buf[0] == INQUIRY) {
327 scsi_handle_inquiry_reply(r, s);
330 req_complete:
331 scsi_req_data(&r->req, len);
332 scsi_req_unref(&r->req);
334 done:
335 aio_context_release(blk_get_aio_context(s->conf.blk));
338 /* Read more data from scsi device into buffer. */
339 static void scsi_read_data(SCSIRequest *req)
341 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
342 SCSIDevice *s = r->req.dev;
343 int ret;
345 trace_scsi_generic_read_data(req->tag);
347 /* The request is used as the AIO opaque value, so add a ref. */
348 scsi_req_ref(&r->req);
349 if (r->len == -1) {
350 scsi_command_complete_noio(r, 0);
351 return;
354 ret = execute_command(s->conf.blk, r, SG_DXFER_FROM_DEV,
355 scsi_read_complete);
356 if (ret < 0) {
357 scsi_command_complete_noio(r, ret);
361 static void scsi_write_complete(void * opaque, int ret)
363 SCSIGenericReq *r = (SCSIGenericReq *)opaque;
364 SCSIDevice *s = r->req.dev;
366 trace_scsi_generic_write_complete(ret);
368 assert(r->req.aiocb != NULL);
369 r->req.aiocb = NULL;
371 aio_context_acquire(blk_get_aio_context(s->conf.blk));
373 if (ret || r->req.io_canceled) {
374 scsi_command_complete_noio(r, ret);
375 goto done;
378 if (r->req.cmd.buf[0] == MODE_SELECT && r->req.cmd.buf[4] == 12 &&
379 s->type == TYPE_TAPE) {
380 s->blocksize = (r->buf[9] << 16) | (r->buf[10] << 8) | r->buf[11];
381 trace_scsi_generic_write_complete_blocksize(s->blocksize);
384 scsi_command_complete_noio(r, ret);
386 done:
387 aio_context_release(blk_get_aio_context(s->conf.blk));
390 /* Write data to a scsi device. Returns nonzero on failure.
391 The transfer may complete asynchronously. */
392 static void scsi_write_data(SCSIRequest *req)
394 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
395 SCSIDevice *s = r->req.dev;
396 int ret;
398 trace_scsi_generic_write_data(req->tag);
399 if (r->len == 0) {
400 r->len = r->buflen;
401 scsi_req_data(&r->req, r->len);
402 return;
405 /* The request is used as the AIO opaque value, so add a ref. */
406 scsi_req_ref(&r->req);
407 ret = execute_command(s->conf.blk, r, SG_DXFER_TO_DEV, scsi_write_complete);
408 if (ret < 0) {
409 scsi_command_complete_noio(r, ret);
413 /* Return a pointer to the data buffer. */
414 static uint8_t *scsi_get_buf(SCSIRequest *req)
416 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
418 return r->buf;
421 static void scsi_generic_command_dump(uint8_t *cmd, int len)
423 int i;
424 char *line_buffer, *p;
426 line_buffer = g_malloc(len * 5 + 1);
428 for (i = 0, p = line_buffer; i < len; i++) {
429 p += sprintf(p, " 0x%02x", cmd[i]);
431 trace_scsi_generic_send_command(line_buffer);
433 g_free(line_buffer);
436 /* Execute a scsi command. Returns the length of the data expected by the
437 command. This will be Positive for data transfers from the device
438 (eg. disk reads), negative for transfers to the device (eg. disk writes),
439 and zero if the command does not transfer any data. */
441 static int32_t scsi_send_command(SCSIRequest *req, uint8_t *cmd)
443 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
444 SCSIDevice *s = r->req.dev;
445 int ret;
447 if (trace_event_get_state_backends(TRACE_SCSI_GENERIC_SEND_COMMAND)) {
448 scsi_generic_command_dump(cmd, r->req.cmd.len);
451 if (r->req.cmd.xfer == 0) {
452 g_free(r->buf);
453 r->buflen = 0;
454 r->buf = NULL;
455 /* The request is used as the AIO opaque value, so add a ref. */
456 scsi_req_ref(&r->req);
457 ret = execute_command(s->conf.blk, r, SG_DXFER_NONE,
458 scsi_command_complete);
459 if (ret < 0) {
460 scsi_command_complete_noio(r, ret);
461 return 0;
463 return 0;
466 if (r->buflen != r->req.cmd.xfer) {
467 g_free(r->buf);
468 r->buf = g_malloc(r->req.cmd.xfer);
469 r->buflen = r->req.cmd.xfer;
472 memset(r->buf, 0, r->buflen);
473 r->len = r->req.cmd.xfer;
474 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
475 r->len = 0;
476 return -r->req.cmd.xfer;
477 } else {
478 return r->req.cmd.xfer;
482 static int read_naa_id(const uint8_t *p, uint64_t *p_wwn)
484 int i;
486 if ((p[1] & 0xF) == 3) {
487 /* NAA designator type */
488 if (p[3] != 8) {
489 return -EINVAL;
491 *p_wwn = ldq_be_p(p + 4);
492 return 0;
495 if ((p[1] & 0xF) == 8) {
496 /* SCSI name string designator type */
497 if (p[3] < 20 || memcmp(&p[4], "naa.", 4)) {
498 return -EINVAL;
500 if (p[3] > 20 && p[24] != ',') {
501 return -EINVAL;
503 *p_wwn = 0;
504 for (i = 8; i < 24; i++) {
505 char c = qemu_toupper(p[i]);
506 c -= (c >= '0' && c <= '9' ? '0' : 'A' - 10);
507 *p_wwn = (*p_wwn << 4) | c;
509 return 0;
512 return -EINVAL;
515 int scsi_SG_IO_FROM_DEV(BlockBackend *blk, uint8_t *cmd, uint8_t cmd_size,
516 uint8_t *buf, uint8_t buf_size, uint32_t timeout)
518 sg_io_hdr_t io_header;
519 uint8_t sensebuf[8];
520 int ret;
522 memset(&io_header, 0, sizeof(io_header));
523 io_header.interface_id = 'S';
524 io_header.dxfer_direction = SG_DXFER_FROM_DEV;
525 io_header.dxfer_len = buf_size;
526 io_header.dxferp = buf;
527 io_header.cmdp = cmd;
528 io_header.cmd_len = cmd_size;
529 io_header.mx_sb_len = sizeof(sensebuf);
530 io_header.sbp = sensebuf;
531 io_header.timeout = timeout * 1000;
533 trace_scsi_generic_ioctl_sgio_command(cmd[0], io_header.timeout);
534 ret = blk_ioctl(blk, SG_IO, &io_header);
535 if (ret < 0 || io_header.status ||
536 io_header.driver_status || io_header.host_status) {
537 trace_scsi_generic_ioctl_sgio_done(cmd[0], ret, io_header.status,
538 io_header.host_status);
539 return -1;
541 return 0;
545 * Executes an INQUIRY request with EVPD set to retrieve the
546 * available VPD pages of the device. If the device does
547 * not support the Block Limits page (page 0xb0), set
548 * the needs_vpd_bl_emulation flag for future use.
550 static void scsi_generic_set_vpd_bl_emulation(SCSIDevice *s)
552 uint8_t cmd[6];
553 uint8_t buf[250];
554 uint8_t page_len;
555 int ret, i;
557 memset(cmd, 0, sizeof(cmd));
558 memset(buf, 0, sizeof(buf));
559 cmd[0] = INQUIRY;
560 cmd[1] = 1;
561 cmd[2] = 0x00;
562 cmd[4] = sizeof(buf);
564 ret = scsi_SG_IO_FROM_DEV(s->conf.blk, cmd, sizeof(cmd),
565 buf, sizeof(buf), s->io_timeout);
566 if (ret < 0) {
568 * Do not assume anything if we can't retrieve the
569 * INQUIRY response to assert the VPD Block Limits
570 * support.
572 s->needs_vpd_bl_emulation = false;
573 return;
576 page_len = buf[3];
577 for (i = 4; i < MIN(sizeof(buf), page_len + 4); i++) {
578 if (buf[i] == 0xb0) {
579 s->needs_vpd_bl_emulation = false;
580 return;
583 s->needs_vpd_bl_emulation = true;
586 static void scsi_generic_read_device_identification(SCSIDevice *s)
588 uint8_t cmd[6];
589 uint8_t buf[250];
590 int ret;
591 int i, len;
593 memset(cmd, 0, sizeof(cmd));
594 memset(buf, 0, sizeof(buf));
595 cmd[0] = INQUIRY;
596 cmd[1] = 1;
597 cmd[2] = 0x83;
598 cmd[4] = sizeof(buf);
600 ret = scsi_SG_IO_FROM_DEV(s->conf.blk, cmd, sizeof(cmd),
601 buf, sizeof(buf), s->io_timeout);
602 if (ret < 0) {
603 return;
606 len = MIN((buf[2] << 8) | buf[3], sizeof(buf) - 4);
607 for (i = 0; i + 3 <= len; ) {
608 const uint8_t *p = &buf[i + 4];
609 uint64_t wwn;
611 if (i + (p[3] + 4) > len) {
612 break;
615 if ((p[1] & 0x10) == 0) {
616 /* Associated with the logical unit */
617 if (read_naa_id(p, &wwn) == 0) {
618 s->wwn = wwn;
620 } else if ((p[1] & 0x10) == 0x10) {
621 /* Associated with the target port */
622 if (read_naa_id(p, &wwn) == 0) {
623 s->port_wwn = wwn;
627 i += p[3] + 4;
631 void scsi_generic_read_device_inquiry(SCSIDevice *s)
633 scsi_generic_read_device_identification(s);
634 if (s->type == TYPE_DISK || s->type == TYPE_ZBC) {
635 scsi_generic_set_vpd_bl_emulation(s);
636 } else {
637 s->needs_vpd_bl_emulation = false;
641 static int get_stream_blocksize(BlockBackend *blk)
643 uint8_t cmd[6];
644 uint8_t buf[12];
645 int ret;
647 memset(cmd, 0, sizeof(cmd));
648 memset(buf, 0, sizeof(buf));
649 cmd[0] = MODE_SENSE;
650 cmd[4] = sizeof(buf);
652 ret = scsi_SG_IO_FROM_DEV(blk, cmd, sizeof(cmd), buf, sizeof(buf), 6);
653 if (ret < 0) {
654 return -1;
657 return (buf[9] << 16) | (buf[10] << 8) | buf[11];
660 static void scsi_generic_reset(DeviceState *dev)
662 SCSIDevice *s = SCSI_DEVICE(dev);
664 s->scsi_version = s->default_scsi_version;
665 scsi_device_purge_requests(s, SENSE_CODE(RESET));
668 static void scsi_generic_realize(SCSIDevice *s, Error **errp)
670 int rc;
671 int sg_version;
672 struct sg_scsi_id scsiid;
674 if (!s->conf.blk) {
675 error_setg(errp, "drive property not set");
676 return;
679 if (blk_get_on_error(s->conf.blk, 0) != BLOCKDEV_ON_ERROR_ENOSPC &&
680 blk_get_on_error(s->conf.blk, 0) != BLOCKDEV_ON_ERROR_REPORT) {
681 error_setg(errp, "Device doesn't support drive option werror");
682 return;
684 if (blk_get_on_error(s->conf.blk, 1) != BLOCKDEV_ON_ERROR_REPORT) {
685 error_setg(errp, "Device doesn't support drive option rerror");
686 return;
689 /* check we are using a driver managing SG_IO (version 3 and after */
690 rc = blk_ioctl(s->conf.blk, SG_GET_VERSION_NUM, &sg_version);
691 if (rc < 0) {
692 error_setg_errno(errp, -rc, "cannot get SG_IO version number");
693 if (rc != -EPERM) {
694 error_append_hint(errp, "Is this a SCSI device?\n");
696 return;
698 if (sg_version < 30000) {
699 error_setg(errp, "scsi generic interface too old");
700 return;
703 /* get LUN of the /dev/sg? */
704 if (blk_ioctl(s->conf.blk, SG_GET_SCSI_ID, &scsiid)) {
705 error_setg(errp, "SG_GET_SCSI_ID ioctl failed");
706 return;
708 if (!blkconf_apply_backend_options(&s->conf,
709 !blk_supports_write_perm(s->conf.blk),
710 true, errp)) {
711 return;
714 /* define device state */
715 s->type = scsiid.scsi_type;
716 trace_scsi_generic_realize_type(s->type);
718 switch (s->type) {
719 case TYPE_TAPE:
720 s->blocksize = get_stream_blocksize(s->conf.blk);
721 if (s->blocksize == -1) {
722 s->blocksize = 0;
724 break;
726 /* Make a guess for block devices, we'll fix it when the guest sends.
727 * READ CAPACITY. If they don't, they likely would assume these sizes
728 * anyway. (TODO: they could also send MODE SENSE).
730 case TYPE_ROM:
731 case TYPE_WORM:
732 s->blocksize = 2048;
733 break;
734 default:
735 s->blocksize = 512;
736 break;
739 trace_scsi_generic_realize_blocksize(s->blocksize);
741 /* Only used by scsi-block, but initialize it nevertheless to be clean. */
742 s->default_scsi_version = -1;
743 s->io_timeout = DEFAULT_IO_TIMEOUT;
744 scsi_generic_read_device_inquiry(s);
747 const SCSIReqOps scsi_generic_req_ops = {
748 .size = sizeof(SCSIGenericReq),
749 .free_req = scsi_free_request,
750 .send_command = scsi_send_command,
751 .read_data = scsi_read_data,
752 .write_data = scsi_write_data,
753 .get_buf = scsi_get_buf,
754 .load_request = scsi_generic_load_request,
755 .save_request = scsi_generic_save_request,
758 static SCSIRequest *scsi_new_request(SCSIDevice *d, uint32_t tag, uint32_t lun,
759 uint8_t *buf, void *hba_private)
761 return scsi_req_alloc(&scsi_generic_req_ops, d, tag, lun, hba_private);
764 static Property scsi_generic_properties[] = {
765 DEFINE_PROP_DRIVE("drive", SCSIDevice, conf.blk),
766 DEFINE_PROP_BOOL("share-rw", SCSIDevice, conf.share_rw, false),
767 DEFINE_PROP_UINT32("io_timeout", SCSIDevice, io_timeout,
768 DEFAULT_IO_TIMEOUT),
769 DEFINE_PROP_END_OF_LIST(),
772 static int scsi_generic_parse_cdb(SCSIDevice *dev, SCSICommand *cmd,
773 uint8_t *buf, void *hba_private)
775 return scsi_bus_parse_cdb(dev, cmd, buf, hba_private);
778 static void scsi_generic_class_initfn(ObjectClass *klass, void *data)
780 DeviceClass *dc = DEVICE_CLASS(klass);
781 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
783 sc->realize = scsi_generic_realize;
784 sc->alloc_req = scsi_new_request;
785 sc->parse_cdb = scsi_generic_parse_cdb;
786 dc->fw_name = "disk";
787 dc->desc = "pass through generic scsi device (/dev/sg*)";
788 dc->reset = scsi_generic_reset;
789 device_class_set_props(dc, scsi_generic_properties);
790 dc->vmsd = &vmstate_scsi_device;
793 static const TypeInfo scsi_generic_info = {
794 .name = "scsi-generic",
795 .parent = TYPE_SCSI_DEVICE,
796 .instance_size = sizeof(SCSIDevice),
797 .class_init = scsi_generic_class_initfn,
800 static void scsi_generic_register_types(void)
802 type_register_static(&scsi_generic_info);
805 type_init(scsi_generic_register_types)
807 #endif /* __linux__ */