ppc/pnv: Remove user-created PHB{3,4,5} devices
[qemu.git] / hw / scsi / scsi-generic.c
blob0306ccc7b1e4827a67aaed926f9333ff4658ad86
1 /*
2 * Generic SCSI Device support
4 * Copyright (c) 2007 Bull S.A.S.
5 * Based on code by Paul Brook
6 * Based on code by Fabrice Bellard
8 * Written by Laurent Vivier <Laurent.Vivier@bull.net>
10 * This code is licensed under the LGPL.
14 #include "qemu/osdep.h"
15 #include "qapi/error.h"
16 #include "qemu/ctype.h"
17 #include "qemu/error-report.h"
18 #include "qemu/module.h"
19 #include "hw/scsi/scsi.h"
20 #include "migration/qemu-file-types.h"
21 #include "hw/qdev-properties.h"
22 #include "hw/qdev-properties-system.h"
23 #include "hw/scsi/emulation.h"
24 #include "sysemu/block-backend.h"
25 #include "trace.h"
27 #ifdef __linux__
29 #include <scsi/sg.h>
30 #include "scsi/constants.h"
32 #ifndef MAX_UINT
33 #define MAX_UINT ((unsigned int)-1)
34 #endif
36 typedef struct SCSIGenericReq {
37 SCSIRequest req;
38 uint8_t *buf;
39 int buflen;
40 int len;
41 sg_io_hdr_t io_header;
42 } SCSIGenericReq;
44 static void scsi_generic_save_request(QEMUFile *f, SCSIRequest *req)
46 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
48 qemu_put_sbe32s(f, &r->buflen);
49 if (r->buflen && r->req.cmd.mode == SCSI_XFER_TO_DEV) {
50 assert(!r->req.sg);
51 qemu_put_buffer(f, r->buf, r->req.cmd.xfer);
55 static void scsi_generic_load_request(QEMUFile *f, SCSIRequest *req)
57 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
59 qemu_get_sbe32s(f, &r->buflen);
60 if (r->buflen && r->req.cmd.mode == SCSI_XFER_TO_DEV) {
61 assert(!r->req.sg);
62 qemu_get_buffer(f, r->buf, r->req.cmd.xfer);
66 static void scsi_free_request(SCSIRequest *req)
68 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
70 g_free(r->buf);
73 /* Helper function for command completion. */
74 static void scsi_command_complete_noio(SCSIGenericReq *r, int ret)
76 int status;
77 SCSISense sense;
78 sg_io_hdr_t *io_hdr = &r->io_header;
80 assert(r->req.aiocb == NULL);
82 if (r->req.io_canceled) {
83 scsi_req_cancel_complete(&r->req);
84 goto done;
86 if (ret < 0) {
87 status = scsi_sense_from_errno(-ret, &sense);
88 if (status == CHECK_CONDITION) {
89 scsi_req_build_sense(&r->req, sense);
91 } else if (io_hdr->host_status != SCSI_HOST_OK) {
92 scsi_req_complete_failed(&r->req, io_hdr->host_status);
93 goto done;
94 } else if (io_hdr->driver_status & SG_ERR_DRIVER_TIMEOUT) {
95 status = BUSY;
96 } else {
97 status = io_hdr->status;
98 if (io_hdr->driver_status & SG_ERR_DRIVER_SENSE) {
99 r->req.sense_len = io_hdr->sb_len_wr;
102 trace_scsi_generic_command_complete_noio(r, r->req.tag, status);
104 scsi_req_complete(&r->req, status);
105 done:
106 scsi_req_unref(&r->req);
109 static void scsi_command_complete(void *opaque, int ret)
111 SCSIGenericReq *r = (SCSIGenericReq *)opaque;
112 SCSIDevice *s = r->req.dev;
114 assert(r->req.aiocb != NULL);
115 r->req.aiocb = NULL;
117 aio_context_acquire(blk_get_aio_context(s->conf.blk));
118 scsi_command_complete_noio(r, ret);
119 aio_context_release(blk_get_aio_context(s->conf.blk));
122 static int execute_command(BlockBackend *blk,
123 SCSIGenericReq *r, int direction,
124 BlockCompletionFunc *complete)
126 SCSIDevice *s = r->req.dev;
128 r->io_header.interface_id = 'S';
129 r->io_header.dxfer_direction = direction;
130 r->io_header.dxferp = r->buf;
131 r->io_header.dxfer_len = r->buflen;
132 r->io_header.cmdp = r->req.cmd.buf;
133 r->io_header.cmd_len = r->req.cmd.len;
134 r->io_header.mx_sb_len = sizeof(r->req.sense);
135 r->io_header.sbp = r->req.sense;
136 r->io_header.timeout = s->io_timeout * 1000;
137 r->io_header.usr_ptr = r;
138 r->io_header.flags |= SG_FLAG_DIRECT_IO;
140 trace_scsi_generic_aio_sgio_command(r->req.tag, r->req.cmd.buf[0],
141 r->io_header.timeout);
142 r->req.aiocb = blk_aio_ioctl(blk, SG_IO, &r->io_header, complete, r);
143 if (r->req.aiocb == NULL) {
144 return -EIO;
147 return 0;
150 static int scsi_handle_inquiry_reply(SCSIGenericReq *r, SCSIDevice *s, int len)
152 uint8_t page, page_idx;
155 * EVPD set to zero returns the standard INQUIRY data.
157 * Check if scsi_version is unset (-1) to avoid re-defining it
158 * each time an INQUIRY with standard data is received.
159 * scsi_version is initialized with -1 in scsi_generic_reset
160 * and scsi_disk_reset, making sure that we'll set the
161 * scsi_version after a reset. If the version field of the
162 * INQUIRY response somehow changes after a guest reboot,
163 * we'll be able to keep track of it.
165 * On SCSI-2 and older, first 3 bits of byte 2 is the
166 * ANSI-approved version, while on later versions the
167 * whole byte 2 contains the version. Check if we're dealing
168 * with a newer version and, in that case, assign the
169 * whole byte.
171 if (s->scsi_version == -1 && !(r->req.cmd.buf[1] & 0x01)) {
172 s->scsi_version = r->buf[2] & 0x07;
173 if (s->scsi_version > 2) {
174 s->scsi_version = r->buf[2];
178 if ((s->type == TYPE_DISK || s->type == TYPE_ZBC) &&
179 (r->req.cmd.buf[1] & 0x01)) {
180 page = r->req.cmd.buf[2];
181 if (page == 0xb0) {
182 uint64_t max_transfer = blk_get_max_hw_transfer(s->conf.blk);
183 uint32_t max_iov = blk_get_max_hw_iov(s->conf.blk);
185 assert(max_transfer);
186 max_transfer = MIN_NON_ZERO(max_transfer, max_iov * qemu_real_host_page_size)
187 / s->blocksize;
188 stl_be_p(&r->buf[8], max_transfer);
189 /* Also take care of the opt xfer len. */
190 stl_be_p(&r->buf[12],
191 MIN_NON_ZERO(max_transfer, ldl_be_p(&r->buf[12])));
192 } else if (s->needs_vpd_bl_emulation && page == 0x00 && r->buflen >= 4) {
194 * Now we're capable of supplying the VPD Block Limits
195 * response if the hardware can't. Add it in the INQUIRY
196 * Supported VPD pages response in case we are using the
197 * emulation for this device.
199 * This way, the guest kernel will be aware of the support
200 * and will use it to proper setup the SCSI device.
202 * VPD page numbers must be sorted, so insert 0xb0 at the
203 * right place with an in-place insert. When the while loop
204 * begins the device response is at r[0] to r[page_idx - 1].
206 page_idx = lduw_be_p(r->buf + 2) + 4;
207 page_idx = MIN(page_idx, r->buflen);
208 while (page_idx > 4 && r->buf[page_idx - 1] >= 0xb0) {
209 if (page_idx < r->buflen) {
210 r->buf[page_idx] = r->buf[page_idx - 1];
212 page_idx--;
214 if (page_idx < r->buflen) {
215 r->buf[page_idx] = 0xb0;
217 stw_be_p(r->buf + 2, lduw_be_p(r->buf + 2) + 1);
219 if (len < r->buflen) {
220 len++;
224 return len;
227 static int scsi_generic_emulate_block_limits(SCSIGenericReq *r, SCSIDevice *s)
229 int len;
230 uint8_t buf[64];
232 SCSIBlockLimits bl = {
233 .max_io_sectors = blk_get_max_transfer(s->conf.blk) / s->blocksize
236 memset(r->buf, 0, r->buflen);
237 stb_p(buf, s->type);
238 stb_p(buf + 1, 0xb0);
239 len = scsi_emulate_block_limits(buf + 4, &bl);
240 assert(len <= sizeof(buf) - 4);
241 stw_be_p(buf + 2, len);
243 memcpy(r->buf, buf, MIN(r->buflen, len + 4));
245 r->io_header.sb_len_wr = 0;
248 * We have valid contents in the reply buffer but the
249 * io_header can report a sense error coming from
250 * the hardware in scsi_command_complete_noio. Clean
251 * up the io_header to avoid reporting it.
253 r->io_header.driver_status = 0;
254 r->io_header.status = 0;
256 return r->buflen;
259 static void scsi_read_complete(void * opaque, int ret)
261 SCSIGenericReq *r = (SCSIGenericReq *)opaque;
262 SCSIDevice *s = r->req.dev;
263 int len;
265 assert(r->req.aiocb != NULL);
266 r->req.aiocb = NULL;
268 aio_context_acquire(blk_get_aio_context(s->conf.blk));
270 if (ret || r->req.io_canceled) {
271 scsi_command_complete_noio(r, ret);
272 goto done;
275 len = r->io_header.dxfer_len - r->io_header.resid;
276 trace_scsi_generic_read_complete(r->req.tag, len);
278 r->len = -1;
280 if (r->io_header.driver_status & SG_ERR_DRIVER_SENSE) {
281 SCSISense sense =
282 scsi_parse_sense_buf(r->req.sense, r->io_header.sb_len_wr);
285 * Check if this is a VPD Block Limits request that
286 * resulted in sense error but would need emulation.
287 * In this case, emulate a valid VPD response.
289 if (sense.key == ILLEGAL_REQUEST &&
290 s->needs_vpd_bl_emulation &&
291 r->req.cmd.buf[0] == INQUIRY &&
292 (r->req.cmd.buf[1] & 0x01) &&
293 r->req.cmd.buf[2] == 0xb0) {
294 len = scsi_generic_emulate_block_limits(r, s);
296 * It's okay to jup to req_complete: no need to
297 * let scsi_handle_inquiry_reply handle an
298 * INQUIRY VPD BL request we created manually.
301 if (sense.key) {
302 goto req_complete;
306 if (r->io_header.host_status != SCSI_HOST_OK ||
307 (r->io_header.driver_status & SG_ERR_DRIVER_TIMEOUT) ||
308 r->io_header.status != GOOD ||
309 len == 0) {
310 scsi_command_complete_noio(r, 0);
311 goto done;
314 /* Snoop READ CAPACITY output to set the blocksize. */
315 if (r->req.cmd.buf[0] == READ_CAPACITY_10 &&
316 (ldl_be_p(&r->buf[0]) != 0xffffffffU || s->max_lba == 0)) {
317 s->blocksize = ldl_be_p(&r->buf[4]);
318 s->max_lba = ldl_be_p(&r->buf[0]) & 0xffffffffULL;
319 } else if (r->req.cmd.buf[0] == SERVICE_ACTION_IN_16 &&
320 (r->req.cmd.buf[1] & 31) == SAI_READ_CAPACITY_16) {
321 s->blocksize = ldl_be_p(&r->buf[8]);
322 s->max_lba = ldq_be_p(&r->buf[0]);
324 blk_set_guest_block_size(s->conf.blk, s->blocksize);
327 * Patch MODE SENSE device specific parameters if the BDS is opened
328 * readonly.
330 if ((s->type == TYPE_DISK || s->type == TYPE_TAPE || s->type == TYPE_ZBC) &&
331 !blk_is_writable(s->conf.blk) &&
332 (r->req.cmd.buf[0] == MODE_SENSE ||
333 r->req.cmd.buf[0] == MODE_SENSE_10) &&
334 (r->req.cmd.buf[1] & 0x8) == 0) {
335 if (r->req.cmd.buf[0] == MODE_SENSE) {
336 r->buf[2] |= 0x80;
337 } else {
338 r->buf[3] |= 0x80;
341 if (r->req.cmd.buf[0] == INQUIRY) {
342 len = scsi_handle_inquiry_reply(r, s, len);
345 req_complete:
346 scsi_req_data(&r->req, len);
347 scsi_req_unref(&r->req);
349 done:
350 aio_context_release(blk_get_aio_context(s->conf.blk));
353 /* Read more data from scsi device into buffer. */
354 static void scsi_read_data(SCSIRequest *req)
356 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
357 SCSIDevice *s = r->req.dev;
358 int ret;
360 trace_scsi_generic_read_data(req->tag);
362 /* The request is used as the AIO opaque value, so add a ref. */
363 scsi_req_ref(&r->req);
364 if (r->len == -1) {
365 scsi_command_complete_noio(r, 0);
366 return;
369 ret = execute_command(s->conf.blk, r, SG_DXFER_FROM_DEV,
370 scsi_read_complete);
371 if (ret < 0) {
372 scsi_command_complete_noio(r, ret);
376 static void scsi_write_complete(void * opaque, int ret)
378 SCSIGenericReq *r = (SCSIGenericReq *)opaque;
379 SCSIDevice *s = r->req.dev;
381 trace_scsi_generic_write_complete(ret);
383 assert(r->req.aiocb != NULL);
384 r->req.aiocb = NULL;
386 aio_context_acquire(blk_get_aio_context(s->conf.blk));
388 if (ret || r->req.io_canceled) {
389 scsi_command_complete_noio(r, ret);
390 goto done;
393 if (r->req.cmd.buf[0] == MODE_SELECT && r->req.cmd.buf[4] == 12 &&
394 s->type == TYPE_TAPE) {
395 s->blocksize = (r->buf[9] << 16) | (r->buf[10] << 8) | r->buf[11];
396 trace_scsi_generic_write_complete_blocksize(s->blocksize);
399 scsi_command_complete_noio(r, ret);
401 done:
402 aio_context_release(blk_get_aio_context(s->conf.blk));
405 /* Write data to a scsi device. Returns nonzero on failure.
406 The transfer may complete asynchronously. */
407 static void scsi_write_data(SCSIRequest *req)
409 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
410 SCSIDevice *s = r->req.dev;
411 int ret;
413 trace_scsi_generic_write_data(req->tag);
414 if (r->len == 0) {
415 r->len = r->buflen;
416 scsi_req_data(&r->req, r->len);
417 return;
420 /* The request is used as the AIO opaque value, so add a ref. */
421 scsi_req_ref(&r->req);
422 ret = execute_command(s->conf.blk, r, SG_DXFER_TO_DEV, scsi_write_complete);
423 if (ret < 0) {
424 scsi_command_complete_noio(r, ret);
428 /* Return a pointer to the data buffer. */
429 static uint8_t *scsi_get_buf(SCSIRequest *req)
431 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
433 return r->buf;
436 static void scsi_generic_command_dump(uint8_t *cmd, int len)
438 int i;
439 char *line_buffer, *p;
441 line_buffer = g_malloc(len * 5 + 1);
443 for (i = 0, p = line_buffer; i < len; i++) {
444 p += sprintf(p, " 0x%02x", cmd[i]);
446 trace_scsi_generic_send_command(line_buffer);
448 g_free(line_buffer);
451 /* Execute a scsi command. Returns the length of the data expected by the
452 command. This will be Positive for data transfers from the device
453 (eg. disk reads), negative for transfers to the device (eg. disk writes),
454 and zero if the command does not transfer any data. */
456 static int32_t scsi_send_command(SCSIRequest *req, uint8_t *cmd)
458 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
459 SCSIDevice *s = r->req.dev;
460 int ret;
462 if (trace_event_get_state_backends(TRACE_SCSI_GENERIC_SEND_COMMAND)) {
463 scsi_generic_command_dump(cmd, r->req.cmd.len);
466 if (r->req.cmd.xfer == 0) {
467 g_free(r->buf);
468 r->buflen = 0;
469 r->buf = NULL;
470 /* The request is used as the AIO opaque value, so add a ref. */
471 scsi_req_ref(&r->req);
472 ret = execute_command(s->conf.blk, r, SG_DXFER_NONE,
473 scsi_command_complete);
474 if (ret < 0) {
475 scsi_command_complete_noio(r, ret);
476 return 0;
478 return 0;
481 if (r->buflen != r->req.cmd.xfer) {
482 g_free(r->buf);
483 r->buf = g_malloc(r->req.cmd.xfer);
484 r->buflen = r->req.cmd.xfer;
487 memset(r->buf, 0, r->buflen);
488 r->len = r->req.cmd.xfer;
489 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
490 r->len = 0;
491 return -r->req.cmd.xfer;
492 } else {
493 return r->req.cmd.xfer;
497 static int read_naa_id(const uint8_t *p, uint64_t *p_wwn)
499 int i;
501 if ((p[1] & 0xF) == 3) {
502 /* NAA designator type */
503 if (p[3] != 8) {
504 return -EINVAL;
506 *p_wwn = ldq_be_p(p + 4);
507 return 0;
510 if ((p[1] & 0xF) == 8) {
511 /* SCSI name string designator type */
512 if (p[3] < 20 || memcmp(&p[4], "naa.", 4)) {
513 return -EINVAL;
515 if (p[3] > 20 && p[24] != ',') {
516 return -EINVAL;
518 *p_wwn = 0;
519 for (i = 8; i < 24; i++) {
520 char c = qemu_toupper(p[i]);
521 c -= (c >= '0' && c <= '9' ? '0' : 'A' - 10);
522 *p_wwn = (*p_wwn << 4) | c;
524 return 0;
527 return -EINVAL;
530 int scsi_SG_IO_FROM_DEV(BlockBackend *blk, uint8_t *cmd, uint8_t cmd_size,
531 uint8_t *buf, uint8_t buf_size, uint32_t timeout)
533 sg_io_hdr_t io_header;
534 uint8_t sensebuf[8];
535 int ret;
537 memset(&io_header, 0, sizeof(io_header));
538 io_header.interface_id = 'S';
539 io_header.dxfer_direction = SG_DXFER_FROM_DEV;
540 io_header.dxfer_len = buf_size;
541 io_header.dxferp = buf;
542 io_header.cmdp = cmd;
543 io_header.cmd_len = cmd_size;
544 io_header.mx_sb_len = sizeof(sensebuf);
545 io_header.sbp = sensebuf;
546 io_header.timeout = timeout * 1000;
548 trace_scsi_generic_ioctl_sgio_command(cmd[0], io_header.timeout);
549 ret = blk_ioctl(blk, SG_IO, &io_header);
550 if (ret < 0 || io_header.status ||
551 io_header.driver_status || io_header.host_status) {
552 trace_scsi_generic_ioctl_sgio_done(cmd[0], ret, io_header.status,
553 io_header.host_status);
554 return -1;
556 return 0;
560 * Executes an INQUIRY request with EVPD set to retrieve the
561 * available VPD pages of the device. If the device does
562 * not support the Block Limits page (page 0xb0), set
563 * the needs_vpd_bl_emulation flag for future use.
565 static void scsi_generic_set_vpd_bl_emulation(SCSIDevice *s)
567 uint8_t cmd[6];
568 uint8_t buf[250];
569 uint8_t page_len;
570 int ret, i;
572 memset(cmd, 0, sizeof(cmd));
573 memset(buf, 0, sizeof(buf));
574 cmd[0] = INQUIRY;
575 cmd[1] = 1;
576 cmd[2] = 0x00;
577 cmd[4] = sizeof(buf);
579 ret = scsi_SG_IO_FROM_DEV(s->conf.blk, cmd, sizeof(cmd),
580 buf, sizeof(buf), s->io_timeout);
581 if (ret < 0) {
583 * Do not assume anything if we can't retrieve the
584 * INQUIRY response to assert the VPD Block Limits
585 * support.
587 s->needs_vpd_bl_emulation = false;
588 return;
591 page_len = buf[3];
592 for (i = 4; i < MIN(sizeof(buf), page_len + 4); i++) {
593 if (buf[i] == 0xb0) {
594 s->needs_vpd_bl_emulation = false;
595 return;
598 s->needs_vpd_bl_emulation = true;
601 static void scsi_generic_read_device_identification(SCSIDevice *s)
603 uint8_t cmd[6];
604 uint8_t buf[250];
605 int ret;
606 int i, len;
608 memset(cmd, 0, sizeof(cmd));
609 memset(buf, 0, sizeof(buf));
610 cmd[0] = INQUIRY;
611 cmd[1] = 1;
612 cmd[2] = 0x83;
613 cmd[4] = sizeof(buf);
615 ret = scsi_SG_IO_FROM_DEV(s->conf.blk, cmd, sizeof(cmd),
616 buf, sizeof(buf), s->io_timeout);
617 if (ret < 0) {
618 return;
621 len = MIN((buf[2] << 8) | buf[3], sizeof(buf) - 4);
622 for (i = 0; i + 3 <= len; ) {
623 const uint8_t *p = &buf[i + 4];
624 uint64_t wwn;
626 if (i + (p[3] + 4) > len) {
627 break;
630 if ((p[1] & 0x10) == 0) {
631 /* Associated with the logical unit */
632 if (read_naa_id(p, &wwn) == 0) {
633 s->wwn = wwn;
635 } else if ((p[1] & 0x10) == 0x10) {
636 /* Associated with the target port */
637 if (read_naa_id(p, &wwn) == 0) {
638 s->port_wwn = wwn;
642 i += p[3] + 4;
646 void scsi_generic_read_device_inquiry(SCSIDevice *s)
648 scsi_generic_read_device_identification(s);
649 if (s->type == TYPE_DISK || s->type == TYPE_ZBC) {
650 scsi_generic_set_vpd_bl_emulation(s);
651 } else {
652 s->needs_vpd_bl_emulation = false;
656 static int get_stream_blocksize(BlockBackend *blk)
658 uint8_t cmd[6];
659 uint8_t buf[12];
660 int ret;
662 memset(cmd, 0, sizeof(cmd));
663 memset(buf, 0, sizeof(buf));
664 cmd[0] = MODE_SENSE;
665 cmd[4] = sizeof(buf);
667 ret = scsi_SG_IO_FROM_DEV(blk, cmd, sizeof(cmd), buf, sizeof(buf), 6);
668 if (ret < 0) {
669 return -1;
672 return (buf[9] << 16) | (buf[10] << 8) | buf[11];
675 static void scsi_generic_reset(DeviceState *dev)
677 SCSIDevice *s = SCSI_DEVICE(dev);
679 s->scsi_version = s->default_scsi_version;
680 scsi_device_purge_requests(s, SENSE_CODE(RESET));
683 static void scsi_generic_realize(SCSIDevice *s, Error **errp)
685 int rc;
686 int sg_version;
687 struct sg_scsi_id scsiid;
689 if (!s->conf.blk) {
690 error_setg(errp, "drive property not set");
691 return;
694 if (blk_get_on_error(s->conf.blk, 0) != BLOCKDEV_ON_ERROR_ENOSPC &&
695 blk_get_on_error(s->conf.blk, 0) != BLOCKDEV_ON_ERROR_REPORT) {
696 error_setg(errp, "Device doesn't support drive option werror");
697 return;
699 if (blk_get_on_error(s->conf.blk, 1) != BLOCKDEV_ON_ERROR_REPORT) {
700 error_setg(errp, "Device doesn't support drive option rerror");
701 return;
704 /* check we are using a driver managing SG_IO (version 3 and after */
705 rc = blk_ioctl(s->conf.blk, SG_GET_VERSION_NUM, &sg_version);
706 if (rc < 0) {
707 error_setg_errno(errp, -rc, "cannot get SG_IO version number");
708 if (rc != -EPERM) {
709 error_append_hint(errp, "Is this a SCSI device?\n");
711 return;
713 if (sg_version < 30000) {
714 error_setg(errp, "scsi generic interface too old");
715 return;
718 /* get LUN of the /dev/sg? */
719 if (blk_ioctl(s->conf.blk, SG_GET_SCSI_ID, &scsiid)) {
720 error_setg(errp, "SG_GET_SCSI_ID ioctl failed");
721 return;
723 if (!blkconf_apply_backend_options(&s->conf,
724 !blk_supports_write_perm(s->conf.blk),
725 true, errp)) {
726 return;
729 /* define device state */
730 s->type = scsiid.scsi_type;
731 trace_scsi_generic_realize_type(s->type);
733 switch (s->type) {
734 case TYPE_TAPE:
735 s->blocksize = get_stream_blocksize(s->conf.blk);
736 if (s->blocksize == -1) {
737 s->blocksize = 0;
739 break;
741 /* Make a guess for block devices, we'll fix it when the guest sends.
742 * READ CAPACITY. If they don't, they likely would assume these sizes
743 * anyway. (TODO: they could also send MODE SENSE).
745 case TYPE_ROM:
746 case TYPE_WORM:
747 s->blocksize = 2048;
748 break;
749 default:
750 s->blocksize = 512;
751 break;
754 trace_scsi_generic_realize_blocksize(s->blocksize);
756 /* Only used by scsi-block, but initialize it nevertheless to be clean. */
757 s->default_scsi_version = -1;
758 s->io_timeout = DEFAULT_IO_TIMEOUT;
759 scsi_generic_read_device_inquiry(s);
762 const SCSIReqOps scsi_generic_req_ops = {
763 .size = sizeof(SCSIGenericReq),
764 .free_req = scsi_free_request,
765 .send_command = scsi_send_command,
766 .read_data = scsi_read_data,
767 .write_data = scsi_write_data,
768 .get_buf = scsi_get_buf,
769 .load_request = scsi_generic_load_request,
770 .save_request = scsi_generic_save_request,
773 static SCSIRequest *scsi_new_request(SCSIDevice *d, uint32_t tag, uint32_t lun,
774 uint8_t *buf, void *hba_private)
776 return scsi_req_alloc(&scsi_generic_req_ops, d, tag, lun, hba_private);
779 static Property scsi_generic_properties[] = {
780 DEFINE_PROP_DRIVE("drive", SCSIDevice, conf.blk),
781 DEFINE_PROP_BOOL("share-rw", SCSIDevice, conf.share_rw, false),
782 DEFINE_PROP_UINT32("io_timeout", SCSIDevice, io_timeout,
783 DEFAULT_IO_TIMEOUT),
784 DEFINE_PROP_END_OF_LIST(),
787 static int scsi_generic_parse_cdb(SCSIDevice *dev, SCSICommand *cmd,
788 uint8_t *buf, void *hba_private)
790 return scsi_bus_parse_cdb(dev, cmd, buf, hba_private);
793 static void scsi_generic_class_initfn(ObjectClass *klass, void *data)
795 DeviceClass *dc = DEVICE_CLASS(klass);
796 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
798 sc->realize = scsi_generic_realize;
799 sc->alloc_req = scsi_new_request;
800 sc->parse_cdb = scsi_generic_parse_cdb;
801 dc->fw_name = "disk";
802 dc->desc = "pass through generic scsi device (/dev/sg*)";
803 dc->reset = scsi_generic_reset;
804 device_class_set_props(dc, scsi_generic_properties);
805 dc->vmsd = &vmstate_scsi_device;
808 static const TypeInfo scsi_generic_info = {
809 .name = "scsi-generic",
810 .parent = TYPE_SCSI_DEVICE,
811 .instance_size = sizeof(SCSIDevice),
812 .class_init = scsi_generic_class_initfn,
815 static void scsi_generic_register_types(void)
817 type_register_static(&scsi_generic_info);
820 type_init(scsi_generic_register_types)
822 #endif /* __linux__ */