scsi: inline sg_io_sense_from_errno() into the callers.
[qemu/ar7.git] / hw / scsi / scsi-generic.c
blob02b87819e56daa366ae84326722747d037cc84f7
1 /*
2 * Generic SCSI Device support
4 * Copyright (c) 2007 Bull S.A.S.
5 * Based on code by Paul Brook
6 * Based on code by Fabrice Bellard
8 * Written by Laurent Vivier <Laurent.Vivier@bull.net>
10 * This code is licensed under the LGPL.
14 #include "qemu/osdep.h"
15 #include "qapi/error.h"
16 #include "qemu/ctype.h"
17 #include "qemu/error-report.h"
18 #include "qemu/module.h"
19 #include "hw/scsi/scsi.h"
20 #include "migration/qemu-file-types.h"
21 #include "hw/qdev-properties.h"
22 #include "hw/qdev-properties-system.h"
23 #include "hw/scsi/emulation.h"
24 #include "sysemu/block-backend.h"
25 #include "trace.h"
27 #ifdef __linux__
29 #include <scsi/sg.h>
30 #include "scsi/constants.h"
32 #ifndef MAX_UINT
33 #define MAX_UINT ((unsigned int)-1)
34 #endif
36 typedef struct SCSIGenericReq {
37 SCSIRequest req;
38 uint8_t *buf;
39 int buflen;
40 int len;
41 sg_io_hdr_t io_header;
42 } SCSIGenericReq;
44 static void scsi_generic_save_request(QEMUFile *f, SCSIRequest *req)
46 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
48 qemu_put_sbe32s(f, &r->buflen);
49 if (r->buflen && r->req.cmd.mode == SCSI_XFER_TO_DEV) {
50 assert(!r->req.sg);
51 qemu_put_buffer(f, r->buf, r->req.cmd.xfer);
55 static void scsi_generic_load_request(QEMUFile *f, SCSIRequest *req)
57 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
59 qemu_get_sbe32s(f, &r->buflen);
60 if (r->buflen && r->req.cmd.mode == SCSI_XFER_TO_DEV) {
61 assert(!r->req.sg);
62 qemu_get_buffer(f, r->buf, r->req.cmd.xfer);
66 static void scsi_free_request(SCSIRequest *req)
68 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
70 g_free(r->buf);
73 /* Helper function for command completion. */
74 static void scsi_command_complete_noio(SCSIGenericReq *r, int ret)
76 int status;
77 SCSISense sense;
78 sg_io_hdr_t *io_hdr = &r->io_header;
80 assert(r->req.aiocb == NULL);
82 if (r->req.io_canceled) {
83 scsi_req_cancel_complete(&r->req);
84 goto done;
86 if (ret < 0) {
87 status = scsi_sense_from_errno(-ret, &sense);
88 if (status == CHECK_CONDITION) {
89 scsi_req_build_sense(&r->req, sense);
91 } else if (io_hdr->host_status != SCSI_HOST_OK) {
92 status = scsi_sense_from_host_status(io_hdr->host_status, &sense);
93 if (status == CHECK_CONDITION) {
94 scsi_req_build_sense(&r->req, sense);
96 } else if (io_hdr->driver_status & SG_ERR_DRIVER_TIMEOUT) {
97 status = BUSY;
98 } else {
99 status = io_hdr->status;
100 if (io_hdr->driver_status & SG_ERR_DRIVER_SENSE) {
101 r->req.sense_len = io_hdr->sb_len_wr;
104 trace_scsi_generic_command_complete_noio(r, r->req.tag, status);
106 scsi_req_complete(&r->req, status);
107 done:
108 scsi_req_unref(&r->req);
111 static void scsi_command_complete(void *opaque, int ret)
113 SCSIGenericReq *r = (SCSIGenericReq *)opaque;
114 SCSIDevice *s = r->req.dev;
116 assert(r->req.aiocb != NULL);
117 r->req.aiocb = NULL;
119 aio_context_acquire(blk_get_aio_context(s->conf.blk));
120 scsi_command_complete_noio(r, ret);
121 aio_context_release(blk_get_aio_context(s->conf.blk));
124 static int execute_command(BlockBackend *blk,
125 SCSIGenericReq *r, int direction,
126 BlockCompletionFunc *complete)
128 SCSIDevice *s = r->req.dev;
130 r->io_header.interface_id = 'S';
131 r->io_header.dxfer_direction = direction;
132 r->io_header.dxferp = r->buf;
133 r->io_header.dxfer_len = r->buflen;
134 r->io_header.cmdp = r->req.cmd.buf;
135 r->io_header.cmd_len = r->req.cmd.len;
136 r->io_header.mx_sb_len = sizeof(r->req.sense);
137 r->io_header.sbp = r->req.sense;
138 r->io_header.timeout = s->io_timeout * 1000;
139 r->io_header.usr_ptr = r;
140 r->io_header.flags |= SG_FLAG_DIRECT_IO;
142 trace_scsi_generic_aio_sgio_command(r->req.tag, r->req.cmd.buf[0],
143 r->io_header.timeout);
144 r->req.aiocb = blk_aio_ioctl(blk, SG_IO, &r->io_header, complete, r);
145 if (r->req.aiocb == NULL) {
146 return -EIO;
149 return 0;
152 static void scsi_handle_inquiry_reply(SCSIGenericReq *r, SCSIDevice *s)
154 uint8_t page, page_idx;
157 * EVPD set to zero returns the standard INQUIRY data.
159 * Check if scsi_version is unset (-1) to avoid re-defining it
160 * each time an INQUIRY with standard data is received.
161 * scsi_version is initialized with -1 in scsi_generic_reset
162 * and scsi_disk_reset, making sure that we'll set the
163 * scsi_version after a reset. If the version field of the
164 * INQUIRY response somehow changes after a guest reboot,
165 * we'll be able to keep track of it.
167 * On SCSI-2 and older, first 3 bits of byte 2 is the
168 * ANSI-approved version, while on later versions the
169 * whole byte 2 contains the version. Check if we're dealing
170 * with a newer version and, in that case, assign the
171 * whole byte.
173 if (s->scsi_version == -1 && !(r->req.cmd.buf[1] & 0x01)) {
174 s->scsi_version = r->buf[2] & 0x07;
175 if (s->scsi_version > 2) {
176 s->scsi_version = r->buf[2];
180 if ((s->type == TYPE_DISK || s->type == TYPE_ZBC) &&
181 (r->req.cmd.buf[1] & 0x01)) {
182 page = r->req.cmd.buf[2];
183 if (page == 0xb0) {
184 uint32_t max_transfer =
185 blk_get_max_transfer(s->conf.blk) / s->blocksize;
187 assert(max_transfer);
188 stl_be_p(&r->buf[8], max_transfer);
189 /* Also take care of the opt xfer len. */
190 stl_be_p(&r->buf[12],
191 MIN_NON_ZERO(max_transfer, ldl_be_p(&r->buf[12])));
192 } else if (s->needs_vpd_bl_emulation && page == 0x00 && r->buflen >= 4) {
194 * Now we're capable of supplying the VPD Block Limits
195 * response if the hardware can't. Add it in the INQUIRY
196 * Supported VPD pages response in case we are using the
197 * emulation for this device.
199 * This way, the guest kernel will be aware of the support
200 * and will use it to proper setup the SCSI device.
202 * VPD page numbers must be sorted, so insert 0xb0 at the
203 * right place with an in-place insert. When the while loop
204 * begins the device response is at r[0] to r[page_idx - 1].
206 page_idx = lduw_be_p(r->buf + 2) + 4;
207 page_idx = MIN(page_idx, r->buflen);
208 while (page_idx > 4 && r->buf[page_idx - 1] >= 0xb0) {
209 if (page_idx < r->buflen) {
210 r->buf[page_idx] = r->buf[page_idx - 1];
212 page_idx--;
214 if (page_idx < r->buflen) {
215 r->buf[page_idx] = 0xb0;
217 stw_be_p(r->buf + 2, lduw_be_p(r->buf + 2) + 1);
222 static int scsi_generic_emulate_block_limits(SCSIGenericReq *r, SCSIDevice *s)
224 int len;
225 uint8_t buf[64];
227 SCSIBlockLimits bl = {
228 .max_io_sectors = blk_get_max_transfer(s->conf.blk) / s->blocksize
231 memset(r->buf, 0, r->buflen);
232 stb_p(buf, s->type);
233 stb_p(buf + 1, 0xb0);
234 len = scsi_emulate_block_limits(buf + 4, &bl);
235 assert(len <= sizeof(buf) - 4);
236 stw_be_p(buf + 2, len);
238 memcpy(r->buf, buf, MIN(r->buflen, len + 4));
240 r->io_header.sb_len_wr = 0;
243 * We have valid contents in the reply buffer but the
244 * io_header can report a sense error coming from
245 * the hardware in scsi_command_complete_noio. Clean
246 * up the io_header to avoid reporting it.
248 r->io_header.driver_status = 0;
249 r->io_header.status = 0;
251 return r->buflen;
254 static void scsi_read_complete(void * opaque, int ret)
256 SCSIGenericReq *r = (SCSIGenericReq *)opaque;
257 SCSIDevice *s = r->req.dev;
258 int len;
260 assert(r->req.aiocb != NULL);
261 r->req.aiocb = NULL;
263 aio_context_acquire(blk_get_aio_context(s->conf.blk));
265 if (ret || r->req.io_canceled) {
266 scsi_command_complete_noio(r, ret);
267 goto done;
270 len = r->io_header.dxfer_len - r->io_header.resid;
271 trace_scsi_generic_read_complete(r->req.tag, len);
273 r->len = -1;
275 if (r->io_header.driver_status & SG_ERR_DRIVER_SENSE) {
276 SCSISense sense =
277 scsi_parse_sense_buf(r->req.sense, r->io_header.sb_len_wr);
280 * Check if this is a VPD Block Limits request that
281 * resulted in sense error but would need emulation.
282 * In this case, emulate a valid VPD response.
284 if (sense.key == ILLEGAL_REQUEST &&
285 s->needs_vpd_bl_emulation &&
286 r->req.cmd.buf[0] == INQUIRY &&
287 (r->req.cmd.buf[1] & 0x01) &&
288 r->req.cmd.buf[2] == 0xb0) {
289 len = scsi_generic_emulate_block_limits(r, s);
291 * It's okay to jup to req_complete: no need to
292 * let scsi_handle_inquiry_reply handle an
293 * INQUIRY VPD BL request we created manually.
296 if (sense.key) {
297 goto req_complete;
301 if (r->io_header.host_status != SCSI_HOST_OK ||
302 (r->io_header.driver_status & SG_ERR_DRIVER_TIMEOUT) ||
303 r->io_header.status != GOOD ||
304 len == 0) {
305 scsi_command_complete_noio(r, 0);
306 goto done;
309 /* Snoop READ CAPACITY output to set the blocksize. */
310 if (r->req.cmd.buf[0] == READ_CAPACITY_10 &&
311 (ldl_be_p(&r->buf[0]) != 0xffffffffU || s->max_lba == 0)) {
312 s->blocksize = ldl_be_p(&r->buf[4]);
313 s->max_lba = ldl_be_p(&r->buf[0]) & 0xffffffffULL;
314 } else if (r->req.cmd.buf[0] == SERVICE_ACTION_IN_16 &&
315 (r->req.cmd.buf[1] & 31) == SAI_READ_CAPACITY_16) {
316 s->blocksize = ldl_be_p(&r->buf[8]);
317 s->max_lba = ldq_be_p(&r->buf[0]);
319 blk_set_guest_block_size(s->conf.blk, s->blocksize);
322 * Patch MODE SENSE device specific parameters if the BDS is opened
323 * readonly.
325 if ((s->type == TYPE_DISK || s->type == TYPE_TAPE || s->type == TYPE_ZBC) &&
326 !blk_is_writable(s->conf.blk) &&
327 (r->req.cmd.buf[0] == MODE_SENSE ||
328 r->req.cmd.buf[0] == MODE_SENSE_10) &&
329 (r->req.cmd.buf[1] & 0x8) == 0) {
330 if (r->req.cmd.buf[0] == MODE_SENSE) {
331 r->buf[2] |= 0x80;
332 } else {
333 r->buf[3] |= 0x80;
336 if (r->req.cmd.buf[0] == INQUIRY) {
337 scsi_handle_inquiry_reply(r, s);
340 req_complete:
341 scsi_req_data(&r->req, len);
342 scsi_req_unref(&r->req);
344 done:
345 aio_context_release(blk_get_aio_context(s->conf.blk));
348 /* Read more data from scsi device into buffer. */
349 static void scsi_read_data(SCSIRequest *req)
351 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
352 SCSIDevice *s = r->req.dev;
353 int ret;
355 trace_scsi_generic_read_data(req->tag);
357 /* The request is used as the AIO opaque value, so add a ref. */
358 scsi_req_ref(&r->req);
359 if (r->len == -1) {
360 scsi_command_complete_noio(r, 0);
361 return;
364 ret = execute_command(s->conf.blk, r, SG_DXFER_FROM_DEV,
365 scsi_read_complete);
366 if (ret < 0) {
367 scsi_command_complete_noio(r, ret);
371 static void scsi_write_complete(void * opaque, int ret)
373 SCSIGenericReq *r = (SCSIGenericReq *)opaque;
374 SCSIDevice *s = r->req.dev;
376 trace_scsi_generic_write_complete(ret);
378 assert(r->req.aiocb != NULL);
379 r->req.aiocb = NULL;
381 aio_context_acquire(blk_get_aio_context(s->conf.blk));
383 if (ret || r->req.io_canceled) {
384 scsi_command_complete_noio(r, ret);
385 goto done;
388 if (r->req.cmd.buf[0] == MODE_SELECT && r->req.cmd.buf[4] == 12 &&
389 s->type == TYPE_TAPE) {
390 s->blocksize = (r->buf[9] << 16) | (r->buf[10] << 8) | r->buf[11];
391 trace_scsi_generic_write_complete_blocksize(s->blocksize);
394 scsi_command_complete_noio(r, ret);
396 done:
397 aio_context_release(blk_get_aio_context(s->conf.blk));
400 /* Write data to a scsi device. Returns nonzero on failure.
401 The transfer may complete asynchronously. */
402 static void scsi_write_data(SCSIRequest *req)
404 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
405 SCSIDevice *s = r->req.dev;
406 int ret;
408 trace_scsi_generic_write_data(req->tag);
409 if (r->len == 0) {
410 r->len = r->buflen;
411 scsi_req_data(&r->req, r->len);
412 return;
415 /* The request is used as the AIO opaque value, so add a ref. */
416 scsi_req_ref(&r->req);
417 ret = execute_command(s->conf.blk, r, SG_DXFER_TO_DEV, scsi_write_complete);
418 if (ret < 0) {
419 scsi_command_complete_noio(r, ret);
423 /* Return a pointer to the data buffer. */
424 static uint8_t *scsi_get_buf(SCSIRequest *req)
426 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
428 return r->buf;
431 static void scsi_generic_command_dump(uint8_t *cmd, int len)
433 int i;
434 char *line_buffer, *p;
436 line_buffer = g_malloc(len * 5 + 1);
438 for (i = 0, p = line_buffer; i < len; i++) {
439 p += sprintf(p, " 0x%02x", cmd[i]);
441 trace_scsi_generic_send_command(line_buffer);
443 g_free(line_buffer);
446 /* Execute a scsi command. Returns the length of the data expected by the
447 command. This will be Positive for data transfers from the device
448 (eg. disk reads), negative for transfers to the device (eg. disk writes),
449 and zero if the command does not transfer any data. */
451 static int32_t scsi_send_command(SCSIRequest *req, uint8_t *cmd)
453 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
454 SCSIDevice *s = r->req.dev;
455 int ret;
457 if (trace_event_get_state_backends(TRACE_SCSI_GENERIC_SEND_COMMAND)) {
458 scsi_generic_command_dump(cmd, r->req.cmd.len);
461 if (r->req.cmd.xfer == 0) {
462 g_free(r->buf);
463 r->buflen = 0;
464 r->buf = NULL;
465 /* The request is used as the AIO opaque value, so add a ref. */
466 scsi_req_ref(&r->req);
467 ret = execute_command(s->conf.blk, r, SG_DXFER_NONE,
468 scsi_command_complete);
469 if (ret < 0) {
470 scsi_command_complete_noio(r, ret);
471 return 0;
473 return 0;
476 if (r->buflen != r->req.cmd.xfer) {
477 g_free(r->buf);
478 r->buf = g_malloc(r->req.cmd.xfer);
479 r->buflen = r->req.cmd.xfer;
482 memset(r->buf, 0, r->buflen);
483 r->len = r->req.cmd.xfer;
484 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
485 r->len = 0;
486 return -r->req.cmd.xfer;
487 } else {
488 return r->req.cmd.xfer;
492 static int read_naa_id(const uint8_t *p, uint64_t *p_wwn)
494 int i;
496 if ((p[1] & 0xF) == 3) {
497 /* NAA designator type */
498 if (p[3] != 8) {
499 return -EINVAL;
501 *p_wwn = ldq_be_p(p + 4);
502 return 0;
505 if ((p[1] & 0xF) == 8) {
506 /* SCSI name string designator type */
507 if (p[3] < 20 || memcmp(&p[4], "naa.", 4)) {
508 return -EINVAL;
510 if (p[3] > 20 && p[24] != ',') {
511 return -EINVAL;
513 *p_wwn = 0;
514 for (i = 8; i < 24; i++) {
515 char c = qemu_toupper(p[i]);
516 c -= (c >= '0' && c <= '9' ? '0' : 'A' - 10);
517 *p_wwn = (*p_wwn << 4) | c;
519 return 0;
522 return -EINVAL;
525 int scsi_SG_IO_FROM_DEV(BlockBackend *blk, uint8_t *cmd, uint8_t cmd_size,
526 uint8_t *buf, uint8_t buf_size, uint32_t timeout)
528 sg_io_hdr_t io_header;
529 uint8_t sensebuf[8];
530 int ret;
532 memset(&io_header, 0, sizeof(io_header));
533 io_header.interface_id = 'S';
534 io_header.dxfer_direction = SG_DXFER_FROM_DEV;
535 io_header.dxfer_len = buf_size;
536 io_header.dxferp = buf;
537 io_header.cmdp = cmd;
538 io_header.cmd_len = cmd_size;
539 io_header.mx_sb_len = sizeof(sensebuf);
540 io_header.sbp = sensebuf;
541 io_header.timeout = timeout * 1000;
543 trace_scsi_generic_ioctl_sgio_command(cmd[0], io_header.timeout);
544 ret = blk_ioctl(blk, SG_IO, &io_header);
545 if (ret < 0 || io_header.status ||
546 io_header.driver_status || io_header.host_status) {
547 trace_scsi_generic_ioctl_sgio_done(cmd[0], ret, io_header.status,
548 io_header.host_status);
549 return -1;
551 return 0;
555 * Executes an INQUIRY request with EVPD set to retrieve the
556 * available VPD pages of the device. If the device does
557 * not support the Block Limits page (page 0xb0), set
558 * the needs_vpd_bl_emulation flag for future use.
560 static void scsi_generic_set_vpd_bl_emulation(SCSIDevice *s)
562 uint8_t cmd[6];
563 uint8_t buf[250];
564 uint8_t page_len;
565 int ret, i;
567 memset(cmd, 0, sizeof(cmd));
568 memset(buf, 0, sizeof(buf));
569 cmd[0] = INQUIRY;
570 cmd[1] = 1;
571 cmd[2] = 0x00;
572 cmd[4] = sizeof(buf);
574 ret = scsi_SG_IO_FROM_DEV(s->conf.blk, cmd, sizeof(cmd),
575 buf, sizeof(buf), s->io_timeout);
576 if (ret < 0) {
578 * Do not assume anything if we can't retrieve the
579 * INQUIRY response to assert the VPD Block Limits
580 * support.
582 s->needs_vpd_bl_emulation = false;
583 return;
586 page_len = buf[3];
587 for (i = 4; i < MIN(sizeof(buf), page_len + 4); i++) {
588 if (buf[i] == 0xb0) {
589 s->needs_vpd_bl_emulation = false;
590 return;
593 s->needs_vpd_bl_emulation = true;
596 static void scsi_generic_read_device_identification(SCSIDevice *s)
598 uint8_t cmd[6];
599 uint8_t buf[250];
600 int ret;
601 int i, len;
603 memset(cmd, 0, sizeof(cmd));
604 memset(buf, 0, sizeof(buf));
605 cmd[0] = INQUIRY;
606 cmd[1] = 1;
607 cmd[2] = 0x83;
608 cmd[4] = sizeof(buf);
610 ret = scsi_SG_IO_FROM_DEV(s->conf.blk, cmd, sizeof(cmd),
611 buf, sizeof(buf), s->io_timeout);
612 if (ret < 0) {
613 return;
616 len = MIN((buf[2] << 8) | buf[3], sizeof(buf) - 4);
617 for (i = 0; i + 3 <= len; ) {
618 const uint8_t *p = &buf[i + 4];
619 uint64_t wwn;
621 if (i + (p[3] + 4) > len) {
622 break;
625 if ((p[1] & 0x10) == 0) {
626 /* Associated with the logical unit */
627 if (read_naa_id(p, &wwn) == 0) {
628 s->wwn = wwn;
630 } else if ((p[1] & 0x10) == 0x10) {
631 /* Associated with the target port */
632 if (read_naa_id(p, &wwn) == 0) {
633 s->port_wwn = wwn;
637 i += p[3] + 4;
641 void scsi_generic_read_device_inquiry(SCSIDevice *s)
643 scsi_generic_read_device_identification(s);
644 if (s->type == TYPE_DISK || s->type == TYPE_ZBC) {
645 scsi_generic_set_vpd_bl_emulation(s);
646 } else {
647 s->needs_vpd_bl_emulation = false;
651 static int get_stream_blocksize(BlockBackend *blk)
653 uint8_t cmd[6];
654 uint8_t buf[12];
655 int ret;
657 memset(cmd, 0, sizeof(cmd));
658 memset(buf, 0, sizeof(buf));
659 cmd[0] = MODE_SENSE;
660 cmd[4] = sizeof(buf);
662 ret = scsi_SG_IO_FROM_DEV(blk, cmd, sizeof(cmd), buf, sizeof(buf), 6);
663 if (ret < 0) {
664 return -1;
667 return (buf[9] << 16) | (buf[10] << 8) | buf[11];
670 static void scsi_generic_reset(DeviceState *dev)
672 SCSIDevice *s = SCSI_DEVICE(dev);
674 s->scsi_version = s->default_scsi_version;
675 scsi_device_purge_requests(s, SENSE_CODE(RESET));
678 static void scsi_generic_realize(SCSIDevice *s, Error **errp)
680 int rc;
681 int sg_version;
682 struct sg_scsi_id scsiid;
684 if (!s->conf.blk) {
685 error_setg(errp, "drive property not set");
686 return;
689 if (blk_get_on_error(s->conf.blk, 0) != BLOCKDEV_ON_ERROR_ENOSPC &&
690 blk_get_on_error(s->conf.blk, 0) != BLOCKDEV_ON_ERROR_REPORT) {
691 error_setg(errp, "Device doesn't support drive option werror");
692 return;
694 if (blk_get_on_error(s->conf.blk, 1) != BLOCKDEV_ON_ERROR_REPORT) {
695 error_setg(errp, "Device doesn't support drive option rerror");
696 return;
699 /* check we are using a driver managing SG_IO (version 3 and after */
700 rc = blk_ioctl(s->conf.blk, SG_GET_VERSION_NUM, &sg_version);
701 if (rc < 0) {
702 error_setg_errno(errp, -rc, "cannot get SG_IO version number");
703 if (rc != -EPERM) {
704 error_append_hint(errp, "Is this a SCSI device?\n");
706 return;
708 if (sg_version < 30000) {
709 error_setg(errp, "scsi generic interface too old");
710 return;
713 /* get LUN of the /dev/sg? */
714 if (blk_ioctl(s->conf.blk, SG_GET_SCSI_ID, &scsiid)) {
715 error_setg(errp, "SG_GET_SCSI_ID ioctl failed");
716 return;
718 if (!blkconf_apply_backend_options(&s->conf,
719 !blk_supports_write_perm(s->conf.blk),
720 true, errp)) {
721 return;
724 /* define device state */
725 s->type = scsiid.scsi_type;
726 trace_scsi_generic_realize_type(s->type);
728 switch (s->type) {
729 case TYPE_TAPE:
730 s->blocksize = get_stream_blocksize(s->conf.blk);
731 if (s->blocksize == -1) {
732 s->blocksize = 0;
734 break;
736 /* Make a guess for block devices, we'll fix it when the guest sends.
737 * READ CAPACITY. If they don't, they likely would assume these sizes
738 * anyway. (TODO: they could also send MODE SENSE).
740 case TYPE_ROM:
741 case TYPE_WORM:
742 s->blocksize = 2048;
743 break;
744 default:
745 s->blocksize = 512;
746 break;
749 trace_scsi_generic_realize_blocksize(s->blocksize);
751 /* Only used by scsi-block, but initialize it nevertheless to be clean. */
752 s->default_scsi_version = -1;
753 s->io_timeout = DEFAULT_IO_TIMEOUT;
754 scsi_generic_read_device_inquiry(s);
757 const SCSIReqOps scsi_generic_req_ops = {
758 .size = sizeof(SCSIGenericReq),
759 .free_req = scsi_free_request,
760 .send_command = scsi_send_command,
761 .read_data = scsi_read_data,
762 .write_data = scsi_write_data,
763 .get_buf = scsi_get_buf,
764 .load_request = scsi_generic_load_request,
765 .save_request = scsi_generic_save_request,
768 static SCSIRequest *scsi_new_request(SCSIDevice *d, uint32_t tag, uint32_t lun,
769 uint8_t *buf, void *hba_private)
771 return scsi_req_alloc(&scsi_generic_req_ops, d, tag, lun, hba_private);
774 static Property scsi_generic_properties[] = {
775 DEFINE_PROP_DRIVE("drive", SCSIDevice, conf.blk),
776 DEFINE_PROP_BOOL("share-rw", SCSIDevice, conf.share_rw, false),
777 DEFINE_PROP_UINT32("io_timeout", SCSIDevice, io_timeout,
778 DEFAULT_IO_TIMEOUT),
779 DEFINE_PROP_END_OF_LIST(),
782 static int scsi_generic_parse_cdb(SCSIDevice *dev, SCSICommand *cmd,
783 uint8_t *buf, void *hba_private)
785 return scsi_bus_parse_cdb(dev, cmd, buf, hba_private);
788 static void scsi_generic_class_initfn(ObjectClass *klass, void *data)
790 DeviceClass *dc = DEVICE_CLASS(klass);
791 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
793 sc->realize = scsi_generic_realize;
794 sc->alloc_req = scsi_new_request;
795 sc->parse_cdb = scsi_generic_parse_cdb;
796 dc->fw_name = "disk";
797 dc->desc = "pass through generic scsi device (/dev/sg*)";
798 dc->reset = scsi_generic_reset;
799 device_class_set_props(dc, scsi_generic_properties);
800 dc->vmsd = &vmstate_scsi_device;
803 static const TypeInfo scsi_generic_info = {
804 .name = "scsi-generic",
805 .parent = TYPE_SCSI_DEVICE,
806 .instance_size = sizeof(SCSIDevice),
807 .class_init = scsi_generic_class_initfn,
810 static void scsi_generic_register_types(void)
812 type_register_static(&scsi_generic_info);
815 type_init(scsi_generic_register_types)
817 #endif /* __linux__ */