ui/vc: replace variable with static text attributes default
[qemu/kevin.git] / hw / scsi / scsi-generic.c
blob2417f0ad84799059a626d41099cd537bf4bdb836
1 /*
2 * Generic SCSI Device support
4 * Copyright (c) 2007 Bull S.A.S.
5 * Based on code by Paul Brook
6 * Based on code by Fabrice Bellard
8 * Written by Laurent Vivier <Laurent.Vivier@bull.net>
10 * This code is licensed under the LGPL.
14 #include "qemu/osdep.h"
15 #include "qapi/error.h"
16 #include "qemu/ctype.h"
17 #include "qemu/error-report.h"
18 #include "qemu/module.h"
19 #include "hw/scsi/scsi.h"
20 #include "migration/qemu-file-types.h"
21 #include "hw/qdev-properties.h"
22 #include "hw/qdev-properties-system.h"
23 #include "hw/scsi/emulation.h"
24 #include "sysemu/block-backend.h"
25 #include "trace.h"
27 #ifdef __linux__
29 #include <scsi/sg.h>
30 #include "scsi/constants.h"
32 #ifndef MAX_UINT
33 #define MAX_UINT ((unsigned int)-1)
34 #endif
36 typedef struct SCSIGenericReq {
37 SCSIRequest req;
38 uint8_t *buf;
39 int buflen;
40 int len;
41 sg_io_hdr_t io_header;
42 } SCSIGenericReq;
44 static void scsi_generic_save_request(QEMUFile *f, SCSIRequest *req)
46 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
48 qemu_put_sbe32s(f, &r->buflen);
49 if (r->buflen && r->req.cmd.mode == SCSI_XFER_TO_DEV) {
50 assert(!r->req.sg);
51 qemu_put_buffer(f, r->buf, r->req.cmd.xfer);
55 static void scsi_generic_load_request(QEMUFile *f, SCSIRequest *req)
57 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
59 qemu_get_sbe32s(f, &r->buflen);
60 if (r->buflen && r->req.cmd.mode == SCSI_XFER_TO_DEV) {
61 assert(!r->req.sg);
62 qemu_get_buffer(f, r->buf, r->req.cmd.xfer);
66 static void scsi_free_request(SCSIRequest *req)
68 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
70 g_free(r->buf);
73 /* Helper function for command completion. */
74 static void scsi_command_complete_noio(SCSIGenericReq *r, int ret)
76 int status;
77 SCSISense sense;
78 sg_io_hdr_t *io_hdr = &r->io_header;
80 assert(r->req.aiocb == NULL);
82 if (r->req.io_canceled) {
83 scsi_req_cancel_complete(&r->req);
84 goto done;
86 if (ret < 0) {
87 status = scsi_sense_from_errno(-ret, &sense);
88 if (status == CHECK_CONDITION) {
89 scsi_req_build_sense(&r->req, sense);
91 } else if (io_hdr->host_status != SCSI_HOST_OK) {
92 scsi_req_complete_failed(&r->req, io_hdr->host_status);
93 goto done;
94 } else if (io_hdr->driver_status & SG_ERR_DRIVER_TIMEOUT) {
95 status = BUSY;
96 } else {
97 status = io_hdr->status;
98 if (io_hdr->driver_status & SG_ERR_DRIVER_SENSE) {
99 r->req.sense_len = io_hdr->sb_len_wr;
102 trace_scsi_generic_command_complete_noio(r, r->req.tag, status);
104 scsi_req_complete(&r->req, status);
105 done:
106 scsi_req_unref(&r->req);
109 static void scsi_command_complete(void *opaque, int ret)
111 SCSIGenericReq *r = (SCSIGenericReq *)opaque;
112 SCSIDevice *s = r->req.dev;
114 aio_context_acquire(blk_get_aio_context(s->conf.blk));
116 assert(r->req.aiocb != NULL);
117 r->req.aiocb = NULL;
119 scsi_command_complete_noio(r, ret);
120 aio_context_release(blk_get_aio_context(s->conf.blk));
123 static int execute_command(BlockBackend *blk,
124 SCSIGenericReq *r, int direction,
125 BlockCompletionFunc *complete)
127 SCSIDevice *s = r->req.dev;
129 r->io_header.interface_id = 'S';
130 r->io_header.dxfer_direction = direction;
131 r->io_header.dxferp = r->buf;
132 r->io_header.dxfer_len = r->buflen;
133 r->io_header.cmdp = r->req.cmd.buf;
134 r->io_header.cmd_len = r->req.cmd.len;
135 r->io_header.mx_sb_len = sizeof(r->req.sense);
136 r->io_header.sbp = r->req.sense;
137 r->io_header.timeout = s->io_timeout * 1000;
138 r->io_header.usr_ptr = r;
139 r->io_header.flags |= SG_FLAG_DIRECT_IO;
141 trace_scsi_generic_aio_sgio_command(r->req.tag, r->req.cmd.buf[0],
142 r->io_header.timeout);
143 r->req.aiocb = blk_aio_ioctl(blk, SG_IO, &r->io_header, complete, r);
144 if (r->req.aiocb == NULL) {
145 return -EIO;
148 return 0;
151 static uint64_t calculate_max_transfer(SCSIDevice *s)
153 uint64_t max_transfer = blk_get_max_hw_transfer(s->conf.blk);
154 uint32_t max_iov = blk_get_max_hw_iov(s->conf.blk);
156 assert(max_transfer);
157 max_transfer = MIN_NON_ZERO(max_transfer,
158 max_iov * qemu_real_host_page_size());
160 return max_transfer / s->blocksize;
163 static int scsi_handle_inquiry_reply(SCSIGenericReq *r, SCSIDevice *s, int len)
165 uint8_t page, page_idx;
168 * EVPD set to zero returns the standard INQUIRY data.
170 * Check if scsi_version is unset (-1) to avoid re-defining it
171 * each time an INQUIRY with standard data is received.
172 * scsi_version is initialized with -1 in scsi_generic_reset
173 * and scsi_disk_reset, making sure that we'll set the
174 * scsi_version after a reset. If the version field of the
175 * INQUIRY response somehow changes after a guest reboot,
176 * we'll be able to keep track of it.
178 * On SCSI-2 and older, first 3 bits of byte 2 is the
179 * ANSI-approved version, while on later versions the
180 * whole byte 2 contains the version. Check if we're dealing
181 * with a newer version and, in that case, assign the
182 * whole byte.
184 if (s->scsi_version == -1 && !(r->req.cmd.buf[1] & 0x01)) {
185 s->scsi_version = r->buf[2] & 0x07;
186 if (s->scsi_version > 2) {
187 s->scsi_version = r->buf[2];
191 if ((s->type == TYPE_DISK || s->type == TYPE_ZBC) &&
192 (r->req.cmd.buf[1] & 0x01)) {
193 page = r->req.cmd.buf[2];
194 if (page == 0xb0 && r->buflen >= 8) {
195 uint8_t buf[16] = {};
196 uint8_t buf_used = MIN(r->buflen, 16);
197 uint64_t max_transfer = calculate_max_transfer(s);
199 memcpy(buf, r->buf, buf_used);
200 stl_be_p(&buf[8], max_transfer);
201 stl_be_p(&buf[12], MIN_NON_ZERO(max_transfer, ldl_be_p(&buf[12])));
202 memcpy(r->buf + 8, buf + 8, buf_used - 8);
204 } else if (s->needs_vpd_bl_emulation && page == 0x00 && r->buflen >= 4) {
206 * Now we're capable of supplying the VPD Block Limits
207 * response if the hardware can't. Add it in the INQUIRY
208 * Supported VPD pages response in case we are using the
209 * emulation for this device.
211 * This way, the guest kernel will be aware of the support
212 * and will use it to proper setup the SCSI device.
214 * VPD page numbers must be sorted, so insert 0xb0 at the
215 * right place with an in-place insert. When the while loop
216 * begins the device response is at r[0] to r[page_idx - 1].
218 page_idx = lduw_be_p(r->buf + 2) + 4;
219 page_idx = MIN(page_idx, r->buflen);
220 while (page_idx > 4 && r->buf[page_idx - 1] >= 0xb0) {
221 if (page_idx < r->buflen) {
222 r->buf[page_idx] = r->buf[page_idx - 1];
224 page_idx--;
226 if (page_idx < r->buflen) {
227 r->buf[page_idx] = 0xb0;
229 stw_be_p(r->buf + 2, lduw_be_p(r->buf + 2) + 1);
231 if (len < r->buflen) {
232 len++;
236 return len;
239 static int scsi_generic_emulate_block_limits(SCSIGenericReq *r, SCSIDevice *s)
241 int len;
242 uint8_t buf[64];
244 SCSIBlockLimits bl = {
245 .max_io_sectors = calculate_max_transfer(s),
248 memset(r->buf, 0, r->buflen);
249 stb_p(buf, s->type);
250 stb_p(buf + 1, 0xb0);
251 len = scsi_emulate_block_limits(buf + 4, &bl);
252 assert(len <= sizeof(buf) - 4);
253 stw_be_p(buf + 2, len);
255 memcpy(r->buf, buf, MIN(r->buflen, len + 4));
257 r->io_header.sb_len_wr = 0;
260 * We have valid contents in the reply buffer but the
261 * io_header can report a sense error coming from
262 * the hardware in scsi_command_complete_noio. Clean
263 * up the io_header to avoid reporting it.
265 r->io_header.driver_status = 0;
266 r->io_header.status = 0;
268 return r->buflen;
271 static void scsi_read_complete(void * opaque, int ret)
273 SCSIGenericReq *r = (SCSIGenericReq *)opaque;
274 SCSIDevice *s = r->req.dev;
275 int len;
277 aio_context_acquire(blk_get_aio_context(s->conf.blk));
279 assert(r->req.aiocb != NULL);
280 r->req.aiocb = NULL;
282 if (ret || r->req.io_canceled) {
283 scsi_command_complete_noio(r, ret);
284 goto done;
287 len = r->io_header.dxfer_len - r->io_header.resid;
288 trace_scsi_generic_read_complete(r->req.tag, len);
290 r->len = -1;
292 if (r->io_header.driver_status & SG_ERR_DRIVER_SENSE) {
293 SCSISense sense =
294 scsi_parse_sense_buf(r->req.sense, r->io_header.sb_len_wr);
297 * Check if this is a VPD Block Limits request that
298 * resulted in sense error but would need emulation.
299 * In this case, emulate a valid VPD response.
301 if (sense.key == ILLEGAL_REQUEST &&
302 s->needs_vpd_bl_emulation &&
303 r->req.cmd.buf[0] == INQUIRY &&
304 (r->req.cmd.buf[1] & 0x01) &&
305 r->req.cmd.buf[2] == 0xb0) {
306 len = scsi_generic_emulate_block_limits(r, s);
308 * It's okay to jup to req_complete: no need to
309 * let scsi_handle_inquiry_reply handle an
310 * INQUIRY VPD BL request we created manually.
313 if (sense.key) {
314 goto req_complete;
318 if (r->io_header.host_status != SCSI_HOST_OK ||
319 (r->io_header.driver_status & SG_ERR_DRIVER_TIMEOUT) ||
320 r->io_header.status != GOOD ||
321 len == 0) {
322 scsi_command_complete_noio(r, 0);
323 goto done;
326 /* Snoop READ CAPACITY output to set the blocksize. */
327 if (r->req.cmd.buf[0] == READ_CAPACITY_10 &&
328 (ldl_be_p(&r->buf[0]) != 0xffffffffU || s->max_lba == 0)) {
329 s->blocksize = ldl_be_p(&r->buf[4]);
330 s->max_lba = ldl_be_p(&r->buf[0]) & 0xffffffffULL;
331 } else if (r->req.cmd.buf[0] == SERVICE_ACTION_IN_16 &&
332 (r->req.cmd.buf[1] & 31) == SAI_READ_CAPACITY_16) {
333 s->blocksize = ldl_be_p(&r->buf[8]);
334 s->max_lba = ldq_be_p(&r->buf[0]);
338 * Patch MODE SENSE device specific parameters if the BDS is opened
339 * readonly.
341 if ((s->type == TYPE_DISK || s->type == TYPE_TAPE || s->type == TYPE_ZBC) &&
342 !blk_is_writable(s->conf.blk) &&
343 (r->req.cmd.buf[0] == MODE_SENSE ||
344 r->req.cmd.buf[0] == MODE_SENSE_10) &&
345 (r->req.cmd.buf[1] & 0x8) == 0) {
346 if (r->req.cmd.buf[0] == MODE_SENSE) {
347 r->buf[2] |= 0x80;
348 } else {
349 r->buf[3] |= 0x80;
352 if (r->req.cmd.buf[0] == INQUIRY) {
353 len = scsi_handle_inquiry_reply(r, s, len);
356 req_complete:
357 scsi_req_data(&r->req, len);
358 scsi_req_unref(&r->req);
360 done:
361 aio_context_release(blk_get_aio_context(s->conf.blk));
364 /* Read more data from scsi device into buffer. */
365 static void scsi_read_data(SCSIRequest *req)
367 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
368 SCSIDevice *s = r->req.dev;
369 int ret;
371 trace_scsi_generic_read_data(req->tag);
373 /* The request is used as the AIO opaque value, so add a ref. */
374 scsi_req_ref(&r->req);
375 if (r->len == -1) {
376 scsi_command_complete_noio(r, 0);
377 return;
380 ret = execute_command(s->conf.blk, r, SG_DXFER_FROM_DEV,
381 scsi_read_complete);
382 if (ret < 0) {
383 scsi_command_complete_noio(r, ret);
387 static void scsi_write_complete(void * opaque, int ret)
389 SCSIGenericReq *r = (SCSIGenericReq *)opaque;
390 SCSIDevice *s = r->req.dev;
392 trace_scsi_generic_write_complete(ret);
394 aio_context_acquire(blk_get_aio_context(s->conf.blk));
396 assert(r->req.aiocb != NULL);
397 r->req.aiocb = NULL;
399 if (ret || r->req.io_canceled) {
400 scsi_command_complete_noio(r, ret);
401 goto done;
404 if (r->req.cmd.buf[0] == MODE_SELECT && r->req.cmd.buf[4] == 12 &&
405 s->type == TYPE_TAPE) {
406 s->blocksize = (r->buf[9] << 16) | (r->buf[10] << 8) | r->buf[11];
407 trace_scsi_generic_write_complete_blocksize(s->blocksize);
410 scsi_command_complete_noio(r, ret);
412 done:
413 aio_context_release(blk_get_aio_context(s->conf.blk));
416 /* Write data to a scsi device. Returns nonzero on failure.
417 The transfer may complete asynchronously. */
418 static void scsi_write_data(SCSIRequest *req)
420 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
421 SCSIDevice *s = r->req.dev;
422 int ret;
424 trace_scsi_generic_write_data(req->tag);
425 if (r->len == 0) {
426 r->len = r->buflen;
427 scsi_req_data(&r->req, r->len);
428 return;
431 /* The request is used as the AIO opaque value, so add a ref. */
432 scsi_req_ref(&r->req);
433 ret = execute_command(s->conf.blk, r, SG_DXFER_TO_DEV, scsi_write_complete);
434 if (ret < 0) {
435 scsi_command_complete_noio(r, ret);
439 /* Return a pointer to the data buffer. */
440 static uint8_t *scsi_get_buf(SCSIRequest *req)
442 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
444 return r->buf;
447 static void scsi_generic_command_dump(uint8_t *cmd, int len)
449 int i;
450 char *line_buffer, *p;
452 line_buffer = g_malloc(len * 5 + 1);
454 for (i = 0, p = line_buffer; i < len; i++) {
455 p += sprintf(p, " 0x%02x", cmd[i]);
457 trace_scsi_generic_send_command(line_buffer);
459 g_free(line_buffer);
462 /* Execute a scsi command. Returns the length of the data expected by the
463 command. This will be Positive for data transfers from the device
464 (eg. disk reads), negative for transfers to the device (eg. disk writes),
465 and zero if the command does not transfer any data. */
467 static int32_t scsi_send_command(SCSIRequest *req, uint8_t *cmd)
469 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
470 SCSIDevice *s = r->req.dev;
471 int ret;
473 if (trace_event_get_state_backends(TRACE_SCSI_GENERIC_SEND_COMMAND)) {
474 scsi_generic_command_dump(cmd, r->req.cmd.len);
477 if (r->req.cmd.xfer == 0) {
478 g_free(r->buf);
479 r->buflen = 0;
480 r->buf = NULL;
481 /* The request is used as the AIO opaque value, so add a ref. */
482 scsi_req_ref(&r->req);
483 ret = execute_command(s->conf.blk, r, SG_DXFER_NONE,
484 scsi_command_complete);
485 if (ret < 0) {
486 scsi_command_complete_noio(r, ret);
487 return 0;
489 return 0;
492 if (r->buflen != r->req.cmd.xfer) {
493 g_free(r->buf);
494 r->buf = g_malloc(r->req.cmd.xfer);
495 r->buflen = r->req.cmd.xfer;
498 memset(r->buf, 0, r->buflen);
499 r->len = r->req.cmd.xfer;
500 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
501 r->len = 0;
502 return -r->req.cmd.xfer;
503 } else {
504 return r->req.cmd.xfer;
508 static int read_naa_id(const uint8_t *p, uint64_t *p_wwn)
510 int i;
512 if ((p[1] & 0xF) == 3) {
513 /* NAA designator type */
514 if (p[3] != 8) {
515 return -EINVAL;
517 *p_wwn = ldq_be_p(p + 4);
518 return 0;
521 if ((p[1] & 0xF) == 8) {
522 /* SCSI name string designator type */
523 if (p[3] < 20 || memcmp(&p[4], "naa.", 4)) {
524 return -EINVAL;
526 if (p[3] > 20 && p[24] != ',') {
527 return -EINVAL;
529 *p_wwn = 0;
530 for (i = 8; i < 24; i++) {
531 char c = qemu_toupper(p[i]);
532 c -= (c >= '0' && c <= '9' ? '0' : 'A' - 10);
533 *p_wwn = (*p_wwn << 4) | c;
535 return 0;
538 return -EINVAL;
541 int scsi_SG_IO_FROM_DEV(BlockBackend *blk, uint8_t *cmd, uint8_t cmd_size,
542 uint8_t *buf, uint8_t buf_size, uint32_t timeout)
544 sg_io_hdr_t io_header;
545 uint8_t sensebuf[8];
546 int ret;
548 memset(&io_header, 0, sizeof(io_header));
549 io_header.interface_id = 'S';
550 io_header.dxfer_direction = SG_DXFER_FROM_DEV;
551 io_header.dxfer_len = buf_size;
552 io_header.dxferp = buf;
553 io_header.cmdp = cmd;
554 io_header.cmd_len = cmd_size;
555 io_header.mx_sb_len = sizeof(sensebuf);
556 io_header.sbp = sensebuf;
557 io_header.timeout = timeout * 1000;
559 trace_scsi_generic_ioctl_sgio_command(cmd[0], io_header.timeout);
560 ret = blk_ioctl(blk, SG_IO, &io_header);
561 if (ret < 0 || io_header.status ||
562 io_header.driver_status || io_header.host_status) {
563 trace_scsi_generic_ioctl_sgio_done(cmd[0], ret, io_header.status,
564 io_header.host_status);
565 return -1;
567 return 0;
571 * Executes an INQUIRY request with EVPD set to retrieve the
572 * available VPD pages of the device. If the device does
573 * not support the Block Limits page (page 0xb0), set
574 * the needs_vpd_bl_emulation flag for future use.
576 static void scsi_generic_set_vpd_bl_emulation(SCSIDevice *s)
578 uint8_t cmd[6];
579 uint8_t buf[250];
580 uint8_t page_len;
581 int ret, i;
583 memset(cmd, 0, sizeof(cmd));
584 memset(buf, 0, sizeof(buf));
585 cmd[0] = INQUIRY;
586 cmd[1] = 1;
587 cmd[2] = 0x00;
588 cmd[4] = sizeof(buf);
590 ret = scsi_SG_IO_FROM_DEV(s->conf.blk, cmd, sizeof(cmd),
591 buf, sizeof(buf), s->io_timeout);
592 if (ret < 0) {
594 * Do not assume anything if we can't retrieve the
595 * INQUIRY response to assert the VPD Block Limits
596 * support.
598 s->needs_vpd_bl_emulation = false;
599 return;
602 page_len = buf[3];
603 for (i = 4; i < MIN(sizeof(buf), page_len + 4); i++) {
604 if (buf[i] == 0xb0) {
605 s->needs_vpd_bl_emulation = false;
606 return;
609 s->needs_vpd_bl_emulation = true;
612 static void scsi_generic_read_device_identification(SCSIDevice *s)
614 uint8_t cmd[6];
615 uint8_t buf[250];
616 int ret;
617 int i, len;
619 memset(cmd, 0, sizeof(cmd));
620 memset(buf, 0, sizeof(buf));
621 cmd[0] = INQUIRY;
622 cmd[1] = 1;
623 cmd[2] = 0x83;
624 cmd[4] = sizeof(buf);
626 ret = scsi_SG_IO_FROM_DEV(s->conf.blk, cmd, sizeof(cmd),
627 buf, sizeof(buf), s->io_timeout);
628 if (ret < 0) {
629 return;
632 len = MIN((buf[2] << 8) | buf[3], sizeof(buf) - 4);
633 for (i = 0; i + 3 <= len; ) {
634 const uint8_t *p = &buf[i + 4];
635 uint64_t wwn;
637 if (i + (p[3] + 4) > len) {
638 break;
641 if ((p[1] & 0x10) == 0) {
642 /* Associated with the logical unit */
643 if (read_naa_id(p, &wwn) == 0) {
644 s->wwn = wwn;
646 } else if ((p[1] & 0x10) == 0x10) {
647 /* Associated with the target port */
648 if (read_naa_id(p, &wwn) == 0) {
649 s->port_wwn = wwn;
653 i += p[3] + 4;
657 void scsi_generic_read_device_inquiry(SCSIDevice *s)
659 scsi_generic_read_device_identification(s);
660 if (s->type == TYPE_DISK || s->type == TYPE_ZBC) {
661 scsi_generic_set_vpd_bl_emulation(s);
662 } else {
663 s->needs_vpd_bl_emulation = false;
667 static int get_stream_blocksize(BlockBackend *blk)
669 uint8_t cmd[6];
670 uint8_t buf[12];
671 int ret;
673 memset(cmd, 0, sizeof(cmd));
674 memset(buf, 0, sizeof(buf));
675 cmd[0] = MODE_SENSE;
676 cmd[4] = sizeof(buf);
678 ret = scsi_SG_IO_FROM_DEV(blk, cmd, sizeof(cmd), buf, sizeof(buf), 6);
679 if (ret < 0) {
680 return -1;
683 return (buf[9] << 16) | (buf[10] << 8) | buf[11];
686 static void scsi_generic_reset(DeviceState *dev)
688 SCSIDevice *s = SCSI_DEVICE(dev);
690 s->scsi_version = s->default_scsi_version;
691 scsi_device_purge_requests(s, SENSE_CODE(RESET));
694 static void scsi_generic_realize(SCSIDevice *s, Error **errp)
696 int rc;
697 int sg_version;
698 struct sg_scsi_id scsiid;
700 if (!s->conf.blk) {
701 error_setg(errp, "drive property not set");
702 return;
705 if (blk_get_on_error(s->conf.blk, 0) != BLOCKDEV_ON_ERROR_ENOSPC &&
706 blk_get_on_error(s->conf.blk, 0) != BLOCKDEV_ON_ERROR_REPORT) {
707 error_setg(errp, "Device doesn't support drive option werror");
708 return;
710 if (blk_get_on_error(s->conf.blk, 1) != BLOCKDEV_ON_ERROR_REPORT) {
711 error_setg(errp, "Device doesn't support drive option rerror");
712 return;
715 /* check we are using a driver managing SG_IO (version 3 and after */
716 rc = blk_ioctl(s->conf.blk, SG_GET_VERSION_NUM, &sg_version);
717 if (rc < 0) {
718 error_setg_errno(errp, -rc, "cannot get SG_IO version number");
719 if (rc != -EPERM) {
720 error_append_hint(errp, "Is this a SCSI device?\n");
722 return;
724 if (sg_version < 30000) {
725 error_setg(errp, "scsi generic interface too old");
726 return;
729 /* get LUN of the /dev/sg? */
730 if (blk_ioctl(s->conf.blk, SG_GET_SCSI_ID, &scsiid)) {
731 error_setg(errp, "SG_GET_SCSI_ID ioctl failed");
732 return;
734 if (!blkconf_apply_backend_options(&s->conf,
735 !blk_supports_write_perm(s->conf.blk),
736 true, errp)) {
737 return;
740 /* define device state */
741 s->type = scsiid.scsi_type;
742 trace_scsi_generic_realize_type(s->type);
744 switch (s->type) {
745 case TYPE_TAPE:
746 s->blocksize = get_stream_blocksize(s->conf.blk);
747 if (s->blocksize == -1) {
748 s->blocksize = 0;
750 break;
752 /* Make a guess for block devices, we'll fix it when the guest sends.
753 * READ CAPACITY. If they don't, they likely would assume these sizes
754 * anyway. (TODO: they could also send MODE SENSE).
756 case TYPE_ROM:
757 case TYPE_WORM:
758 s->blocksize = 2048;
759 break;
760 default:
761 s->blocksize = 512;
762 break;
765 trace_scsi_generic_realize_blocksize(s->blocksize);
767 /* Only used by scsi-block, but initialize it nevertheless to be clean. */
768 s->default_scsi_version = -1;
769 s->io_timeout = DEFAULT_IO_TIMEOUT;
770 scsi_generic_read_device_inquiry(s);
773 const SCSIReqOps scsi_generic_req_ops = {
774 .size = sizeof(SCSIGenericReq),
775 .free_req = scsi_free_request,
776 .send_command = scsi_send_command,
777 .read_data = scsi_read_data,
778 .write_data = scsi_write_data,
779 .get_buf = scsi_get_buf,
780 .load_request = scsi_generic_load_request,
781 .save_request = scsi_generic_save_request,
784 static SCSIRequest *scsi_new_request(SCSIDevice *d, uint32_t tag, uint32_t lun,
785 uint8_t *buf, void *hba_private)
787 return scsi_req_alloc(&scsi_generic_req_ops, d, tag, lun, hba_private);
790 static Property scsi_generic_properties[] = {
791 DEFINE_PROP_DRIVE("drive", SCSIDevice, conf.blk),
792 DEFINE_PROP_BOOL("share-rw", SCSIDevice, conf.share_rw, false),
793 DEFINE_PROP_UINT32("io_timeout", SCSIDevice, io_timeout,
794 DEFAULT_IO_TIMEOUT),
795 DEFINE_PROP_END_OF_LIST(),
798 static int scsi_generic_parse_cdb(SCSIDevice *dev, SCSICommand *cmd,
799 uint8_t *buf, size_t buf_len,
800 void *hba_private)
802 return scsi_bus_parse_cdb(dev, cmd, buf, buf_len, hba_private);
805 static void scsi_generic_class_initfn(ObjectClass *klass, void *data)
807 DeviceClass *dc = DEVICE_CLASS(klass);
808 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
810 sc->realize = scsi_generic_realize;
811 sc->alloc_req = scsi_new_request;
812 sc->parse_cdb = scsi_generic_parse_cdb;
813 dc->fw_name = "disk";
814 dc->desc = "pass through generic scsi device (/dev/sg*)";
815 dc->reset = scsi_generic_reset;
816 device_class_set_props(dc, scsi_generic_properties);
817 dc->vmsd = &vmstate_scsi_device;
820 static const TypeInfo scsi_generic_info = {
821 .name = "scsi-generic",
822 .parent = TYPE_SCSI_DEVICE,
823 .instance_size = sizeof(SCSIDevice),
824 .class_init = scsi_generic_class_initfn,
827 static void scsi_generic_register_types(void)
829 type_register_static(&scsi_generic_info);
832 type_init(scsi_generic_register_types)
834 #endif /* __linux__ */