hw/audio/es1370: remove unused dolog macro
[qemu/ar7.git] / hw / ufs / lu.c
blobe1c46bddb11d0bbd3c04b8f591a3c25699ce2fb6
1 /*
2 * QEMU UFS Logical Unit
4 * Copyright (c) 2023 Samsung Electronics Co., Ltd. All rights reserved.
6 * Written by Jeuk Kim <jeuk20.kim@samsung.com>
8 * This code is licensed under the GNU GPL v2 or later.
9 */
11 #include "qemu/osdep.h"
12 #include "qemu/units.h"
13 #include "qapi/error.h"
14 #include "qemu/memalign.h"
15 #include "hw/scsi/scsi.h"
16 #include "scsi/constants.h"
17 #include "sysemu/block-backend.h"
18 #include "qemu/cutils.h"
19 #include "trace.h"
20 #include "ufs.h"
23 * The code below handling SCSI commands is copied from hw/scsi/scsi-disk.c,
24 * with minor adjustments to make it work for UFS.
27 #define SCSI_DMA_BUF_SIZE (128 * KiB)
28 #define SCSI_MAX_INQUIRY_LEN 256
29 #define SCSI_INQUIRY_DATA_SIZE 36
30 #define SCSI_MAX_MODE_LEN 256
32 typedef struct UfsSCSIReq {
33 SCSIRequest req;
34 /* Both sector and sector_count are in terms of BDRV_SECTOR_SIZE bytes. */
35 uint64_t sector;
36 uint32_t sector_count;
37 uint32_t buflen;
38 bool started;
39 bool need_fua_emulation;
40 struct iovec iov;
41 QEMUIOVector qiov;
42 BlockAcctCookie acct;
43 } UfsSCSIReq;
45 static void ufs_scsi_free_request(SCSIRequest *req)
47 UfsSCSIReq *r = DO_UPCAST(UfsSCSIReq, req, req);
49 qemu_vfree(r->iov.iov_base);
52 static void scsi_check_condition(UfsSCSIReq *r, SCSISense sense)
54 trace_ufs_scsi_check_condition(r->req.tag, sense.key, sense.asc,
55 sense.ascq);
56 scsi_req_build_sense(&r->req, sense);
57 scsi_req_complete(&r->req, CHECK_CONDITION);
60 static int ufs_scsi_emulate_vpd_page(SCSIRequest *req, uint8_t *outbuf,
61 uint32_t outbuf_len)
63 UfsHc *u = UFS(req->bus->qbus.parent);
64 UfsLu *lu = DO_UPCAST(UfsLu, qdev, req->dev);
65 uint8_t page_code = req->cmd.buf[2];
66 int start, buflen = 0;
68 if (outbuf_len < SCSI_INQUIRY_DATA_SIZE) {
69 return -1;
72 outbuf[buflen++] = lu->qdev.type & 0x1f;
73 outbuf[buflen++] = page_code;
74 outbuf[buflen++] = 0x00;
75 outbuf[buflen++] = 0x00;
76 start = buflen;
78 switch (page_code) {
79 case 0x00: /* Supported page codes, mandatory */
81 trace_ufs_scsi_emulate_vpd_page_00(req->cmd.xfer);
82 outbuf[buflen++] = 0x00; /* list of supported pages (this page) */
83 if (u->params.serial) {
84 outbuf[buflen++] = 0x80; /* unit serial number */
86 outbuf[buflen++] = 0x87; /* mode page policy */
87 break;
89 case 0x80: /* Device serial number, optional */
91 int l;
93 if (!u->params.serial) {
94 trace_ufs_scsi_emulate_vpd_page_80_not_supported();
95 return -1;
98 l = strlen(u->params.serial);
99 if (l > SCSI_INQUIRY_DATA_SIZE) {
100 l = SCSI_INQUIRY_DATA_SIZE;
103 trace_ufs_scsi_emulate_vpd_page_80(req->cmd.xfer);
104 memcpy(outbuf + buflen, u->params.serial, l);
105 buflen += l;
106 break;
108 case 0x87: /* Mode Page Policy, mandatory */
110 trace_ufs_scsi_emulate_vpd_page_87(req->cmd.xfer);
111 outbuf[buflen++] = 0x3f; /* apply to all mode pages and subpages */
112 outbuf[buflen++] = 0xff;
113 outbuf[buflen++] = 0; /* shared */
114 outbuf[buflen++] = 0;
115 break;
117 default:
118 return -1;
120 /* done with EVPD */
121 assert(buflen - start <= 255);
122 outbuf[start - 1] = buflen - start;
123 return buflen;
126 static int ufs_scsi_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf,
127 uint32_t outbuf_len)
129 int buflen = 0;
131 if (outbuf_len < SCSI_INQUIRY_DATA_SIZE) {
132 return -1;
135 if (req->cmd.buf[1] & 0x1) {
136 /* Vital product data */
137 return ufs_scsi_emulate_vpd_page(req, outbuf, outbuf_len);
140 /* Standard INQUIRY data */
141 if (req->cmd.buf[2] != 0) {
142 return -1;
145 /* PAGE CODE == 0 */
146 buflen = req->cmd.xfer;
147 if (buflen > SCSI_MAX_INQUIRY_LEN) {
148 buflen = SCSI_MAX_INQUIRY_LEN;
151 if (is_wlun(req->lun)) {
152 outbuf[0] = TYPE_WLUN;
153 } else {
154 outbuf[0] = 0;
156 outbuf[1] = 0;
158 strpadcpy((char *)&outbuf[16], 16, "QEMU UFS", ' ');
159 strpadcpy((char *)&outbuf[8], 8, "QEMU", ' ');
161 memset(&outbuf[32], 0, 4);
163 outbuf[2] = 0x06; /* SPC-4 */
164 outbuf[3] = 0x2;
166 if (buflen > SCSI_INQUIRY_DATA_SIZE) {
167 outbuf[4] = buflen - 5; /* Additional Length = (Len - 1) - 4 */
168 } else {
170 * If the allocation length of CDB is too small, the additional
171 * length is not adjusted
173 outbuf[4] = SCSI_INQUIRY_DATA_SIZE - 5;
176 /* Support TCQ. */
177 outbuf[7] = req->bus->info->tcq ? 0x02 : 0;
178 return buflen;
181 static int mode_sense_page(UfsLu *lu, int page, uint8_t **p_outbuf,
182 int page_control)
184 static const int mode_sense_valid[0x3f] = {
185 [MODE_PAGE_CACHING] = 1,
186 [MODE_PAGE_R_W_ERROR] = 1,
187 [MODE_PAGE_CONTROL] = 1,
190 uint8_t *p = *p_outbuf + 2;
191 int length;
193 assert(page < ARRAY_SIZE(mode_sense_valid));
194 if ((mode_sense_valid[page]) == 0) {
195 return -1;
199 * If Changeable Values are requested, a mask denoting those mode parameters
200 * that are changeable shall be returned. As we currently don't support
201 * parameter changes via MODE_SELECT all bits are returned set to zero.
202 * The buffer was already memset to zero by the caller of this function.
204 switch (page) {
205 case MODE_PAGE_CACHING:
206 length = 0x12;
207 if (page_control == 1 || /* Changeable Values */
208 blk_enable_write_cache(lu->qdev.conf.blk)) {
209 p[0] = 4; /* WCE */
211 break;
213 case MODE_PAGE_R_W_ERROR:
214 length = 10;
215 if (page_control == 1) { /* Changeable Values */
216 break;
218 p[0] = 0x80; /* Automatic Write Reallocation Enabled */
219 break;
221 case MODE_PAGE_CONTROL:
222 length = 10;
223 if (page_control == 1) { /* Changeable Values */
224 break;
226 p[1] = 0x10; /* Queue Algorithm modifier */
227 p[8] = 0xff; /* Busy Timeout Period */
228 p[9] = 0xff;
229 break;
231 default:
232 return -1;
235 assert(length < 256);
236 (*p_outbuf)[0] = page;
237 (*p_outbuf)[1] = length;
238 *p_outbuf += length + 2;
239 return length + 2;
242 static int ufs_scsi_emulate_mode_sense(UfsSCSIReq *r, uint8_t *outbuf)
244 UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev);
245 bool dbd;
246 int page, buflen, ret, page_control;
247 uint8_t *p;
248 uint8_t dev_specific_param = 0;
250 dbd = (r->req.cmd.buf[1] & 0x8) != 0;
251 if (!dbd) {
252 return -1;
255 page = r->req.cmd.buf[2] & 0x3f;
256 page_control = (r->req.cmd.buf[2] & 0xc0) >> 6;
258 trace_ufs_scsi_emulate_mode_sense((r->req.cmd.buf[0] == MODE_SENSE) ? 6 :
260 page, r->req.cmd.xfer, page_control);
261 memset(outbuf, 0, r->req.cmd.xfer);
262 p = outbuf;
264 if (!blk_is_writable(lu->qdev.conf.blk)) {
265 dev_specific_param |= 0x80; /* Readonly. */
268 p[2] = 0; /* Medium type. */
269 p[3] = dev_specific_param;
270 p[6] = p[7] = 0; /* Block descriptor length. */
271 p += 8;
273 if (page_control == 3) {
274 /* Saved Values */
275 scsi_check_condition(r, SENSE_CODE(SAVING_PARAMS_NOT_SUPPORTED));
276 return -1;
279 if (page == 0x3f) {
280 for (page = 0; page <= 0x3e; page++) {
281 mode_sense_page(lu, page, &p, page_control);
283 } else {
284 ret = mode_sense_page(lu, page, &p, page_control);
285 if (ret == -1) {
286 return -1;
290 buflen = p - outbuf;
292 * The mode data length field specifies the length in bytes of the
293 * following data that is available to be transferred. The mode data
294 * length does not include itself.
296 outbuf[0] = ((buflen - 2) >> 8) & 0xff;
297 outbuf[1] = (buflen - 2) & 0xff;
298 return buflen;
302 * scsi_handle_rw_error has two return values. False means that the error
303 * must be ignored, true means that the error has been processed and the
304 * caller should not do anything else for this request. Note that
305 * scsi_handle_rw_error always manages its reference counts, independent
306 * of the return value.
308 static bool scsi_handle_rw_error(UfsSCSIReq *r, int ret, bool acct_failed)
310 bool is_read = (r->req.cmd.mode == SCSI_XFER_FROM_DEV);
311 UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev);
312 SCSISense sense = SENSE_CODE(NO_SENSE);
313 int error = 0;
314 bool req_has_sense = false;
315 BlockErrorAction action;
316 int status;
318 if (ret < 0) {
319 status = scsi_sense_from_errno(-ret, &sense);
320 error = -ret;
321 } else {
322 /* A passthrough command has completed with nonzero status. */
323 status = ret;
324 if (status == CHECK_CONDITION) {
325 req_has_sense = true;
326 error = scsi_sense_buf_to_errno(r->req.sense, sizeof(r->req.sense));
327 } else {
328 error = EINVAL;
333 * Check whether the error has to be handled by the guest or should
334 * rather follow the rerror=/werror= settings. Guest-handled errors
335 * are usually retried immediately, so do not post them to QMP and
336 * do not account them as failed I/O.
338 if (req_has_sense && scsi_sense_buf_is_guest_recoverable(
339 r->req.sense, sizeof(r->req.sense))) {
340 action = BLOCK_ERROR_ACTION_REPORT;
341 acct_failed = false;
342 } else {
343 action = blk_get_error_action(lu->qdev.conf.blk, is_read, error);
344 blk_error_action(lu->qdev.conf.blk, action, is_read, error);
347 switch (action) {
348 case BLOCK_ERROR_ACTION_REPORT:
349 if (acct_failed) {
350 block_acct_failed(blk_get_stats(lu->qdev.conf.blk), &r->acct);
352 if (!req_has_sense && status == CHECK_CONDITION) {
353 scsi_req_build_sense(&r->req, sense);
355 scsi_req_complete(&r->req, status);
356 return true;
358 case BLOCK_ERROR_ACTION_IGNORE:
359 return false;
361 case BLOCK_ERROR_ACTION_STOP:
362 scsi_req_retry(&r->req);
363 return true;
365 default:
366 g_assert_not_reached();
370 static bool ufs_scsi_req_check_error(UfsSCSIReq *r, int ret, bool acct_failed)
372 if (r->req.io_canceled) {
373 scsi_req_cancel_complete(&r->req);
374 return true;
377 if (ret < 0) {
378 return scsi_handle_rw_error(r, ret, acct_failed);
381 return false;
384 static void scsi_aio_complete(void *opaque, int ret)
386 UfsSCSIReq *r = (UfsSCSIReq *)opaque;
387 UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev);
389 assert(r->req.aiocb != NULL);
390 r->req.aiocb = NULL;
391 aio_context_acquire(blk_get_aio_context(lu->qdev.conf.blk));
392 if (ufs_scsi_req_check_error(r, ret, true)) {
393 goto done;
396 block_acct_done(blk_get_stats(lu->qdev.conf.blk), &r->acct);
397 scsi_req_complete(&r->req, GOOD);
399 done:
400 aio_context_release(blk_get_aio_context(lu->qdev.conf.blk));
401 scsi_req_unref(&r->req);
404 static int32_t ufs_scsi_emulate_command(SCSIRequest *req, uint8_t *buf)
406 UfsSCSIReq *r = DO_UPCAST(UfsSCSIReq, req, req);
407 UfsLu *lu = DO_UPCAST(UfsLu, qdev, req->dev);
408 uint32_t last_block = 0;
409 uint8_t *outbuf;
410 int buflen;
412 switch (req->cmd.buf[0]) {
413 case INQUIRY:
414 case MODE_SENSE_10:
415 case START_STOP:
416 case REQUEST_SENSE:
417 break;
419 default:
420 if (!blk_is_available(lu->qdev.conf.blk)) {
421 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
422 return 0;
424 break;
428 * FIXME: we shouldn't return anything bigger than 4k, but the code
429 * requires the buffer to be as big as req->cmd.xfer in several
430 * places. So, do not allow CDBs with a very large ALLOCATION
431 * LENGTH. The real fix would be to modify scsi_read_data and
432 * dma_buf_read, so that they return data beyond the buflen
433 * as all zeros.
435 if (req->cmd.xfer > 65536) {
436 goto illegal_request;
438 r->buflen = MAX(4096, req->cmd.xfer);
440 if (!r->iov.iov_base) {
441 r->iov.iov_base = blk_blockalign(lu->qdev.conf.blk, r->buflen);
444 outbuf = r->iov.iov_base;
445 memset(outbuf, 0, r->buflen);
446 switch (req->cmd.buf[0]) {
447 case TEST_UNIT_READY:
448 assert(blk_is_available(lu->qdev.conf.blk));
449 break;
450 case INQUIRY:
451 buflen = ufs_scsi_emulate_inquiry(req, outbuf, r->buflen);
452 if (buflen < 0) {
453 goto illegal_request;
455 break;
456 case MODE_SENSE_10:
457 buflen = ufs_scsi_emulate_mode_sense(r, outbuf);
458 if (buflen < 0) {
459 goto illegal_request;
461 break;
462 case READ_CAPACITY_10:
463 /* The normal LEN field for this command is zero. */
464 memset(outbuf, 0, 8);
465 if (lu->qdev.max_lba > 0) {
466 last_block = lu->qdev.max_lba - 1;
468 outbuf[0] = (last_block >> 24) & 0xff;
469 outbuf[1] = (last_block >> 16) & 0xff;
470 outbuf[2] = (last_block >> 8) & 0xff;
471 outbuf[3] = last_block & 0xff;
472 outbuf[4] = (lu->qdev.blocksize >> 24) & 0xff;
473 outbuf[5] = (lu->qdev.blocksize >> 16) & 0xff;
474 outbuf[6] = (lu->qdev.blocksize >> 8) & 0xff;
475 outbuf[7] = lu->qdev.blocksize & 0xff;
476 break;
477 case REQUEST_SENSE:
478 /* Just return "NO SENSE". */
479 buflen = scsi_convert_sense(NULL, 0, outbuf, r->buflen,
480 (req->cmd.buf[1] & 1) == 0);
481 if (buflen < 0) {
482 goto illegal_request;
484 break;
485 case SYNCHRONIZE_CACHE:
486 /* The request is used as the AIO opaque value, so add a ref. */
487 scsi_req_ref(&r->req);
488 block_acct_start(blk_get_stats(lu->qdev.conf.blk), &r->acct, 0,
489 BLOCK_ACCT_FLUSH);
490 r->req.aiocb = blk_aio_flush(lu->qdev.conf.blk, scsi_aio_complete, r);
491 return 0;
492 case VERIFY_10:
493 trace_ufs_scsi_emulate_command_VERIFY((req->cmd.buf[1] >> 1) & 3);
494 if (req->cmd.buf[1] & 6) {
495 goto illegal_request;
497 break;
498 case SERVICE_ACTION_IN_16:
499 /* Service Action In subcommands. */
500 if ((req->cmd.buf[1] & 31) == SAI_READ_CAPACITY_16) {
501 trace_ufs_scsi_emulate_command_SAI_16();
502 memset(outbuf, 0, req->cmd.xfer);
504 if (lu->qdev.max_lba > 0) {
505 last_block = lu->qdev.max_lba - 1;
507 outbuf[0] = 0;
508 outbuf[1] = 0;
509 outbuf[2] = 0;
510 outbuf[3] = 0;
511 outbuf[4] = (last_block >> 24) & 0xff;
512 outbuf[5] = (last_block >> 16) & 0xff;
513 outbuf[6] = (last_block >> 8) & 0xff;
514 outbuf[7] = last_block & 0xff;
515 outbuf[8] = (lu->qdev.blocksize >> 24) & 0xff;
516 outbuf[9] = (lu->qdev.blocksize >> 16) & 0xff;
517 outbuf[10] = (lu->qdev.blocksize >> 8) & 0xff;
518 outbuf[11] = lu->qdev.blocksize & 0xff;
519 outbuf[12] = 0;
520 outbuf[13] = get_physical_block_exp(&lu->qdev.conf);
522 if (lu->unit_desc.provisioning_type == 2 ||
523 lu->unit_desc.provisioning_type == 3) {
524 outbuf[14] = 0x80;
526 /* Protection, exponent and lowest lba field left blank. */
527 break;
529 trace_ufs_scsi_emulate_command_SAI_unsupported();
530 goto illegal_request;
531 case MODE_SELECT_10:
532 trace_ufs_scsi_emulate_command_MODE_SELECT_10(r->req.cmd.xfer);
533 break;
534 case START_STOP:
536 * TODO: START_STOP is not yet implemented. It always returns success.
537 * Revisit it when ufs power management is implemented.
539 trace_ufs_scsi_emulate_command_START_STOP();
540 break;
541 case FORMAT_UNIT:
542 trace_ufs_scsi_emulate_command_FORMAT_UNIT();
543 break;
544 case SEND_DIAGNOSTIC:
545 trace_ufs_scsi_emulate_command_SEND_DIAGNOSTIC();
546 break;
547 default:
548 trace_ufs_scsi_emulate_command_UNKNOWN(buf[0],
549 scsi_command_name(buf[0]));
550 scsi_check_condition(r, SENSE_CODE(INVALID_OPCODE));
551 return 0;
553 assert(!r->req.aiocb);
554 r->iov.iov_len = MIN(r->buflen, req->cmd.xfer);
555 if (r->iov.iov_len == 0) {
556 scsi_req_complete(&r->req, GOOD);
558 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
559 assert(r->iov.iov_len == req->cmd.xfer);
560 return -r->iov.iov_len;
561 } else {
562 return r->iov.iov_len;
565 illegal_request:
566 if (r->req.status == -1) {
567 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
569 return 0;
572 static void ufs_scsi_emulate_read_data(SCSIRequest *req)
574 UfsSCSIReq *r = DO_UPCAST(UfsSCSIReq, req, req);
575 int buflen = r->iov.iov_len;
577 if (buflen) {
578 trace_ufs_scsi_emulate_read_data(buflen);
579 r->iov.iov_len = 0;
580 r->started = true;
581 scsi_req_data(&r->req, buflen);
582 return;
585 /* This also clears the sense buffer for REQUEST SENSE. */
586 scsi_req_complete(&r->req, GOOD);
589 static int ufs_scsi_check_mode_select(UfsLu *lu, int page, uint8_t *inbuf,
590 int inlen)
592 uint8_t mode_current[SCSI_MAX_MODE_LEN];
593 uint8_t mode_changeable[SCSI_MAX_MODE_LEN];
594 uint8_t *p;
595 int len, expected_len, changeable_len, i;
598 * The input buffer does not include the page header, so it is
599 * off by 2 bytes.
601 expected_len = inlen + 2;
602 if (expected_len > SCSI_MAX_MODE_LEN) {
603 return -1;
606 /* MODE_PAGE_ALLS is only valid for MODE SENSE commands */
607 if (page == MODE_PAGE_ALLS) {
608 return -1;
611 p = mode_current;
612 memset(mode_current, 0, inlen + 2);
613 len = mode_sense_page(lu, page, &p, 0);
614 if (len < 0 || len != expected_len) {
615 return -1;
618 p = mode_changeable;
619 memset(mode_changeable, 0, inlen + 2);
620 changeable_len = mode_sense_page(lu, page, &p, 1);
621 assert(changeable_len == len);
624 * Check that unchangeable bits are the same as what MODE SENSE
625 * would return.
627 for (i = 2; i < len; i++) {
628 if (((mode_current[i] ^ inbuf[i - 2]) & ~mode_changeable[i]) != 0) {
629 return -1;
632 return 0;
635 static void ufs_scsi_apply_mode_select(UfsLu *lu, int page, uint8_t *p)
637 switch (page) {
638 case MODE_PAGE_CACHING:
639 blk_set_enable_write_cache(lu->qdev.conf.blk, (p[0] & 4) != 0);
640 break;
642 default:
643 break;
647 static int mode_select_pages(UfsSCSIReq *r, uint8_t *p, int len, bool change)
649 UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev);
651 while (len > 0) {
652 int page, page_len;
654 page = p[0] & 0x3f;
655 if (p[0] & 0x40) {
656 goto invalid_param;
657 } else {
658 if (len < 2) {
659 goto invalid_param_len;
661 page_len = p[1];
662 p += 2;
663 len -= 2;
666 if (page_len > len) {
667 goto invalid_param_len;
670 if (!change) {
671 if (ufs_scsi_check_mode_select(lu, page, p, page_len) < 0) {
672 goto invalid_param;
674 } else {
675 ufs_scsi_apply_mode_select(lu, page, p);
678 p += page_len;
679 len -= page_len;
681 return 0;
683 invalid_param:
684 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM));
685 return -1;
687 invalid_param_len:
688 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN));
689 return -1;
692 static void ufs_scsi_emulate_mode_select(UfsSCSIReq *r, uint8_t *inbuf)
694 UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev);
695 uint8_t *p = inbuf;
696 int len = r->req.cmd.xfer;
697 int hdr_len = 8;
698 int bd_len;
699 int pass;
701 /* We only support PF=1, SP=0. */
702 if ((r->req.cmd.buf[1] & 0x11) != 0x10) {
703 goto invalid_field;
706 if (len < hdr_len) {
707 goto invalid_param_len;
710 bd_len = lduw_be_p(&p[6]);
711 if (bd_len != 0) {
712 goto invalid_param;
715 len -= hdr_len;
716 p += hdr_len;
718 /* Ensure no change is made if there is an error! */
719 for (pass = 0; pass < 2; pass++) {
720 if (mode_select_pages(r, p, len, pass == 1) < 0) {
721 assert(pass == 0);
722 return;
726 if (!blk_enable_write_cache(lu->qdev.conf.blk)) {
727 /* The request is used as the AIO opaque value, so add a ref. */
728 scsi_req_ref(&r->req);
729 block_acct_start(blk_get_stats(lu->qdev.conf.blk), &r->acct, 0,
730 BLOCK_ACCT_FLUSH);
731 r->req.aiocb = blk_aio_flush(lu->qdev.conf.blk, scsi_aio_complete, r);
732 return;
735 scsi_req_complete(&r->req, GOOD);
736 return;
738 invalid_param:
739 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM));
740 return;
742 invalid_param_len:
743 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN));
744 return;
746 invalid_field:
747 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
750 /* block_num and nb_blocks expected to be in qdev blocksize */
751 static inline bool check_lba_range(UfsLu *lu, uint64_t block_num,
752 uint32_t nb_blocks)
755 * The first line tests that no overflow happens when computing the last
756 * block. The second line tests that the last accessed block is in
757 * range.
759 * Careful, the computations should not underflow for nb_blocks == 0,
760 * and a 0-block read to the first LBA beyond the end of device is
761 * valid.
763 return (block_num <= block_num + nb_blocks &&
764 block_num + nb_blocks <= lu->qdev.max_lba + 1);
767 static void ufs_scsi_emulate_write_data(SCSIRequest *req)
769 UfsSCSIReq *r = DO_UPCAST(UfsSCSIReq, req, req);
771 if (r->iov.iov_len) {
772 int buflen = r->iov.iov_len;
773 trace_ufs_scsi_emulate_write_data(buflen);
774 r->iov.iov_len = 0;
775 scsi_req_data(&r->req, buflen);
776 return;
779 switch (req->cmd.buf[0]) {
780 case MODE_SELECT_10:
781 /* This also clears the sense buffer for REQUEST SENSE. */
782 ufs_scsi_emulate_mode_select(r, r->iov.iov_base);
783 break;
784 default:
785 abort();
789 /* Return a pointer to the data buffer. */
790 static uint8_t *ufs_scsi_get_buf(SCSIRequest *req)
792 UfsSCSIReq *r = DO_UPCAST(UfsSCSIReq, req, req);
794 return (uint8_t *)r->iov.iov_base;
797 static int32_t ufs_scsi_dma_command(SCSIRequest *req, uint8_t *buf)
799 UfsSCSIReq *r = DO_UPCAST(UfsSCSIReq, req, req);
800 UfsLu *lu = DO_UPCAST(UfsLu, qdev, req->dev);
801 uint32_t len;
802 uint8_t command;
804 command = buf[0];
806 if (!blk_is_available(lu->qdev.conf.blk)) {
807 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
808 return 0;
811 len = scsi_data_cdb_xfer(r->req.cmd.buf);
812 switch (command) {
813 case READ_6:
814 case READ_10:
815 trace_ufs_scsi_dma_command_READ(r->req.cmd.lba, len);
816 if (r->req.cmd.buf[1] & 0xe0) {
817 goto illegal_request;
819 if (!check_lba_range(lu, r->req.cmd.lba, len)) {
820 goto illegal_lba;
822 r->sector = r->req.cmd.lba * (lu->qdev.blocksize / BDRV_SECTOR_SIZE);
823 r->sector_count = len * (lu->qdev.blocksize / BDRV_SECTOR_SIZE);
824 break;
825 case WRITE_6:
826 case WRITE_10:
827 trace_ufs_scsi_dma_command_WRITE(r->req.cmd.lba, len);
828 if (!blk_is_writable(lu->qdev.conf.blk)) {
829 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED));
830 return 0;
832 if (r->req.cmd.buf[1] & 0xe0) {
833 goto illegal_request;
835 if (!check_lba_range(lu, r->req.cmd.lba, len)) {
836 goto illegal_lba;
838 r->sector = r->req.cmd.lba * (lu->qdev.blocksize / BDRV_SECTOR_SIZE);
839 r->sector_count = len * (lu->qdev.blocksize / BDRV_SECTOR_SIZE);
840 break;
841 default:
842 abort();
843 illegal_request:
844 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
845 return 0;
846 illegal_lba:
847 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
848 return 0;
850 r->need_fua_emulation = ((r->req.cmd.buf[1] & 8) != 0);
851 if (r->sector_count == 0) {
852 scsi_req_complete(&r->req, GOOD);
854 assert(r->iov.iov_len == 0);
855 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
856 return -r->sector_count * BDRV_SECTOR_SIZE;
857 } else {
858 return r->sector_count * BDRV_SECTOR_SIZE;
862 static void scsi_write_do_fua(UfsSCSIReq *r)
864 UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev);
866 assert(r->req.aiocb == NULL);
867 assert(!r->req.io_canceled);
869 if (r->need_fua_emulation) {
870 block_acct_start(blk_get_stats(lu->qdev.conf.blk), &r->acct, 0,
871 BLOCK_ACCT_FLUSH);
872 r->req.aiocb = blk_aio_flush(lu->qdev.conf.blk, scsi_aio_complete, r);
873 return;
876 scsi_req_complete(&r->req, GOOD);
877 scsi_req_unref(&r->req);
880 static void scsi_dma_complete_noio(UfsSCSIReq *r, int ret)
882 assert(r->req.aiocb == NULL);
883 if (ufs_scsi_req_check_error(r, ret, false)) {
884 goto done;
887 r->sector += r->sector_count;
888 r->sector_count = 0;
889 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
890 scsi_write_do_fua(r);
891 return;
892 } else {
893 scsi_req_complete(&r->req, GOOD);
896 done:
897 scsi_req_unref(&r->req);
900 static void scsi_dma_complete(void *opaque, int ret)
902 UfsSCSIReq *r = (UfsSCSIReq *)opaque;
903 UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev);
905 assert(r->req.aiocb != NULL);
906 r->req.aiocb = NULL;
908 aio_context_acquire(blk_get_aio_context(lu->qdev.conf.blk));
909 if (ret < 0) {
910 block_acct_failed(blk_get_stats(lu->qdev.conf.blk), &r->acct);
911 } else {
912 block_acct_done(blk_get_stats(lu->qdev.conf.blk), &r->acct);
914 scsi_dma_complete_noio(r, ret);
915 aio_context_release(blk_get_aio_context(lu->qdev.conf.blk));
918 static BlockAIOCB *scsi_dma_readv(int64_t offset, QEMUIOVector *iov,
919 BlockCompletionFunc *cb, void *cb_opaque,
920 void *opaque)
922 UfsSCSIReq *r = opaque;
923 UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev);
924 return blk_aio_preadv(lu->qdev.conf.blk, offset, iov, 0, cb, cb_opaque);
927 static void scsi_init_iovec(UfsSCSIReq *r, size_t size)
929 UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev);
931 if (!r->iov.iov_base) {
932 r->buflen = size;
933 r->iov.iov_base = blk_blockalign(lu->qdev.conf.blk, r->buflen);
935 r->iov.iov_len = MIN(r->sector_count * BDRV_SECTOR_SIZE, r->buflen);
936 qemu_iovec_init_external(&r->qiov, &r->iov, 1);
939 static void scsi_read_complete_noio(UfsSCSIReq *r, int ret)
941 uint32_t n;
943 assert(r->req.aiocb == NULL);
944 if (ufs_scsi_req_check_error(r, ret, false)) {
945 goto done;
948 n = r->qiov.size / BDRV_SECTOR_SIZE;
949 r->sector += n;
950 r->sector_count -= n;
951 scsi_req_data(&r->req, r->qiov.size);
953 done:
954 scsi_req_unref(&r->req);
957 static void scsi_read_complete(void *opaque, int ret)
959 UfsSCSIReq *r = (UfsSCSIReq *)opaque;
960 UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev);
962 assert(r->req.aiocb != NULL);
963 r->req.aiocb = NULL;
964 trace_ufs_scsi_read_data_count(r->sector_count);
965 aio_context_acquire(blk_get_aio_context(lu->qdev.conf.blk));
966 if (ret < 0) {
967 block_acct_failed(blk_get_stats(lu->qdev.conf.blk), &r->acct);
968 } else {
969 block_acct_done(blk_get_stats(lu->qdev.conf.blk), &r->acct);
970 trace_ufs_scsi_read_complete(r->req.tag, r->qiov.size);
972 scsi_read_complete_noio(r, ret);
973 aio_context_release(blk_get_aio_context(lu->qdev.conf.blk));
976 /* Actually issue a read to the block device. */
977 static void scsi_do_read(UfsSCSIReq *r, int ret)
979 UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev);
981 assert(r->req.aiocb == NULL);
982 if (ufs_scsi_req_check_error(r, ret, false)) {
983 goto done;
986 /* The request is used as the AIO opaque value, so add a ref. */
987 scsi_req_ref(&r->req);
989 if (r->req.sg) {
990 dma_acct_start(lu->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_READ);
991 r->req.residual -= r->req.sg->size;
992 r->req.aiocb = dma_blk_io(
993 blk_get_aio_context(lu->qdev.conf.blk), r->req.sg,
994 r->sector << BDRV_SECTOR_BITS, BDRV_SECTOR_SIZE, scsi_dma_readv, r,
995 scsi_dma_complete, r, DMA_DIRECTION_FROM_DEVICE);
996 } else {
997 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE);
998 block_acct_start(blk_get_stats(lu->qdev.conf.blk), &r->acct,
999 r->qiov.size, BLOCK_ACCT_READ);
1000 r->req.aiocb = scsi_dma_readv(r->sector << BDRV_SECTOR_BITS, &r->qiov,
1001 scsi_read_complete, r, r);
1004 done:
1005 scsi_req_unref(&r->req);
1008 static void scsi_do_read_cb(void *opaque, int ret)
1010 UfsSCSIReq *r = (UfsSCSIReq *)opaque;
1011 UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev);
1013 assert(r->req.aiocb != NULL);
1014 r->req.aiocb = NULL;
1016 aio_context_acquire(blk_get_aio_context(lu->qdev.conf.blk));
1017 if (ret < 0) {
1018 block_acct_failed(blk_get_stats(lu->qdev.conf.blk), &r->acct);
1019 } else {
1020 block_acct_done(blk_get_stats(lu->qdev.conf.blk), &r->acct);
1022 scsi_do_read(opaque, ret);
1023 aio_context_release(blk_get_aio_context(lu->qdev.conf.blk));
1026 /* Read more data from scsi device into buffer. */
1027 static void scsi_read_data(SCSIRequest *req)
1029 UfsSCSIReq *r = DO_UPCAST(UfsSCSIReq, req, req);
1030 UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev);
1031 bool first;
1033 trace_ufs_scsi_read_data_count(r->sector_count);
1034 if (r->sector_count == 0) {
1035 /* This also clears the sense buffer for REQUEST SENSE. */
1036 scsi_req_complete(&r->req, GOOD);
1037 return;
1040 /* No data transfer may already be in progress */
1041 assert(r->req.aiocb == NULL);
1043 /* The request is used as the AIO opaque value, so add a ref. */
1044 scsi_req_ref(&r->req);
1045 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
1046 trace_ufs_scsi_read_data_invalid();
1047 scsi_read_complete_noio(r, -EINVAL);
1048 return;
1051 if (!blk_is_available(req->dev->conf.blk)) {
1052 scsi_read_complete_noio(r, -ENOMEDIUM);
1053 return;
1056 first = !r->started;
1057 r->started = true;
1058 if (first && r->need_fua_emulation) {
1059 block_acct_start(blk_get_stats(lu->qdev.conf.blk), &r->acct, 0,
1060 BLOCK_ACCT_FLUSH);
1061 r->req.aiocb = blk_aio_flush(lu->qdev.conf.blk, scsi_do_read_cb, r);
1062 } else {
1063 scsi_do_read(r, 0);
1067 static void scsi_write_complete_noio(UfsSCSIReq *r, int ret)
1069 uint32_t n;
1071 assert(r->req.aiocb == NULL);
1072 if (ufs_scsi_req_check_error(r, ret, false)) {
1073 goto done;
1076 n = r->qiov.size / BDRV_SECTOR_SIZE;
1077 r->sector += n;
1078 r->sector_count -= n;
1079 if (r->sector_count == 0) {
1080 scsi_write_do_fua(r);
1081 return;
1082 } else {
1083 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE);
1084 trace_ufs_scsi_write_complete_noio(r->req.tag, r->qiov.size);
1085 scsi_req_data(&r->req, r->qiov.size);
1088 done:
1089 scsi_req_unref(&r->req);
1092 static void scsi_write_complete(void *opaque, int ret)
1094 UfsSCSIReq *r = (UfsSCSIReq *)opaque;
1095 UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev);
1097 assert(r->req.aiocb != NULL);
1098 r->req.aiocb = NULL;
1100 aio_context_acquire(blk_get_aio_context(lu->qdev.conf.blk));
1101 if (ret < 0) {
1102 block_acct_failed(blk_get_stats(lu->qdev.conf.blk), &r->acct);
1103 } else {
1104 block_acct_done(blk_get_stats(lu->qdev.conf.blk), &r->acct);
1106 scsi_write_complete_noio(r, ret);
1107 aio_context_release(blk_get_aio_context(lu->qdev.conf.blk));
1110 static BlockAIOCB *scsi_dma_writev(int64_t offset, QEMUIOVector *iov,
1111 BlockCompletionFunc *cb, void *cb_opaque,
1112 void *opaque)
1114 UfsSCSIReq *r = opaque;
1115 UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev);
1116 return blk_aio_pwritev(lu->qdev.conf.blk, offset, iov, 0, cb, cb_opaque);
1119 static void scsi_write_data(SCSIRequest *req)
1121 UfsSCSIReq *r = DO_UPCAST(UfsSCSIReq, req, req);
1122 UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev);
1124 /* No data transfer may already be in progress */
1125 assert(r->req.aiocb == NULL);
1127 /* The request is used as the AIO opaque value, so add a ref. */
1128 scsi_req_ref(&r->req);
1129 if (r->req.cmd.mode != SCSI_XFER_TO_DEV) {
1130 trace_ufs_scsi_write_data_invalid();
1131 scsi_write_complete_noio(r, -EINVAL);
1132 return;
1135 if (!r->req.sg && !r->qiov.size) {
1136 /* Called for the first time. Ask the driver to send us more data. */
1137 r->started = true;
1138 scsi_write_complete_noio(r, 0);
1139 return;
1141 if (!blk_is_available(req->dev->conf.blk)) {
1142 scsi_write_complete_noio(r, -ENOMEDIUM);
1143 return;
1146 if (r->req.sg) {
1147 dma_acct_start(lu->qdev.conf.blk, &r->acct, r->req.sg,
1148 BLOCK_ACCT_WRITE);
1149 r->req.residual -= r->req.sg->size;
1150 r->req.aiocb = dma_blk_io(
1151 blk_get_aio_context(lu->qdev.conf.blk), r->req.sg,
1152 r->sector << BDRV_SECTOR_BITS, BDRV_SECTOR_SIZE, scsi_dma_writev, r,
1153 scsi_dma_complete, r, DMA_DIRECTION_TO_DEVICE);
1154 } else {
1155 block_acct_start(blk_get_stats(lu->qdev.conf.blk), &r->acct,
1156 r->qiov.size, BLOCK_ACCT_WRITE);
1157 r->req.aiocb = scsi_dma_writev(r->sector << BDRV_SECTOR_BITS, &r->qiov,
1158 scsi_write_complete, r, r);
1162 static const SCSIReqOps ufs_scsi_emulate_reqops = {
1163 .size = sizeof(UfsSCSIReq),
1164 .free_req = ufs_scsi_free_request,
1165 .send_command = ufs_scsi_emulate_command,
1166 .read_data = ufs_scsi_emulate_read_data,
1167 .write_data = ufs_scsi_emulate_write_data,
1168 .get_buf = ufs_scsi_get_buf,
1171 static const SCSIReqOps ufs_scsi_dma_reqops = {
1172 .size = sizeof(UfsSCSIReq),
1173 .free_req = ufs_scsi_free_request,
1174 .send_command = ufs_scsi_dma_command,
1175 .read_data = scsi_read_data,
1176 .write_data = scsi_write_data,
1177 .get_buf = ufs_scsi_get_buf,
1181 * Following commands are not yet supported
1182 * PRE_FETCH(10),
1183 * UNMAP,
1184 * WRITE_BUFFER, READ_BUFFER,
1185 * SECURITY_PROTOCOL_IN, SECURITY_PROTOCOL_OUT
1187 static const SCSIReqOps *const ufs_scsi_reqops_dispatch[256] = {
1188 [TEST_UNIT_READY] = &ufs_scsi_emulate_reqops,
1189 [INQUIRY] = &ufs_scsi_emulate_reqops,
1190 [MODE_SENSE_10] = &ufs_scsi_emulate_reqops,
1191 [START_STOP] = &ufs_scsi_emulate_reqops,
1192 [READ_CAPACITY_10] = &ufs_scsi_emulate_reqops,
1193 [REQUEST_SENSE] = &ufs_scsi_emulate_reqops,
1194 [SYNCHRONIZE_CACHE] = &ufs_scsi_emulate_reqops,
1195 [MODE_SELECT_10] = &ufs_scsi_emulate_reqops,
1196 [VERIFY_10] = &ufs_scsi_emulate_reqops,
1197 [FORMAT_UNIT] = &ufs_scsi_emulate_reqops,
1198 [SERVICE_ACTION_IN_16] = &ufs_scsi_emulate_reqops,
1199 [SEND_DIAGNOSTIC] = &ufs_scsi_emulate_reqops,
1201 [READ_6] = &ufs_scsi_dma_reqops,
1202 [READ_10] = &ufs_scsi_dma_reqops,
1203 [WRITE_6] = &ufs_scsi_dma_reqops,
1204 [WRITE_10] = &ufs_scsi_dma_reqops,
1207 static SCSIRequest *scsi_new_request(SCSIDevice *dev, uint32_t tag,
1208 uint32_t lun, uint8_t *buf,
1209 void *hba_private)
1211 UfsLu *lu = DO_UPCAST(UfsLu, qdev, dev);
1212 SCSIRequest *req;
1213 const SCSIReqOps *ops;
1214 uint8_t command;
1216 command = buf[0];
1217 ops = ufs_scsi_reqops_dispatch[command];
1218 if (!ops) {
1219 ops = &ufs_scsi_emulate_reqops;
1221 req = scsi_req_alloc(ops, &lu->qdev, tag, lun, hba_private);
1223 return req;
1226 static Property ufs_lu_props[] = {
1227 DEFINE_PROP_DRIVE("drive", UfsLu, qdev.conf.blk),
1228 DEFINE_PROP_END_OF_LIST(),
1231 static bool ufs_lu_brdv_init(UfsLu *lu, Error **errp)
1233 SCSIDevice *dev = &lu->qdev;
1234 bool read_only;
1236 if (!lu->qdev.conf.blk) {
1237 error_setg(errp, "drive property not set");
1238 return false;
1241 if (!blkconf_blocksizes(&lu->qdev.conf, errp)) {
1242 return false;
1245 if (blk_get_aio_context(lu->qdev.conf.blk) != qemu_get_aio_context() &&
1246 !lu->qdev.hba_supports_iothread) {
1247 error_setg(errp, "HBA does not support iothreads");
1248 return false;
1251 read_only = !blk_supports_write_perm(lu->qdev.conf.blk);
1253 if (!blkconf_apply_backend_options(&dev->conf, read_only,
1254 dev->type == TYPE_DISK, errp)) {
1255 return false;
1258 if (blk_is_sg(lu->qdev.conf.blk)) {
1259 error_setg(errp, "unwanted /dev/sg*");
1260 return false;
1263 blk_iostatus_enable(lu->qdev.conf.blk);
1264 return true;
1267 static bool ufs_add_lu(UfsHc *u, UfsLu *lu, Error **errp)
1269 BlockBackend *blk = lu->qdev.conf.blk;
1270 int64_t brdv_len = blk_getlength(blk);
1271 uint64_t raw_dev_cap =
1272 be64_to_cpu(u->geometry_desc.total_raw_device_capacity);
1274 if (u->device_desc.number_lu >= UFS_MAX_LUS) {
1275 error_setg(errp, "ufs host controller has too many logical units.");
1276 return false;
1279 if (u->lus[lu->lun] != NULL) {
1280 error_setg(errp, "ufs logical unit %d already exists.", lu->lun);
1281 return false;
1284 u->lus[lu->lun] = lu;
1285 u->device_desc.number_lu++;
1286 raw_dev_cap += (brdv_len >> UFS_GEOMETRY_CAPACITY_SHIFT);
1287 u->geometry_desc.total_raw_device_capacity = cpu_to_be64(raw_dev_cap);
1288 return true;
1291 static inline uint8_t ufs_log2(uint64_t input)
1293 int log = 0;
1294 while (input >>= 1) {
1295 log++;
1297 return log;
1300 static void ufs_init_lu(UfsLu *lu)
1302 BlockBackend *blk = lu->qdev.conf.blk;
1303 int64_t brdv_len = blk_getlength(blk);
1305 lu->lun = lu->qdev.lun;
1306 memset(&lu->unit_desc, 0, sizeof(lu->unit_desc));
1307 lu->unit_desc.length = sizeof(UnitDescriptor);
1308 lu->unit_desc.descriptor_idn = UFS_QUERY_DESC_IDN_UNIT;
1309 lu->unit_desc.lu_enable = 0x01;
1310 lu->unit_desc.logical_block_size = ufs_log2(lu->qdev.blocksize);
1311 lu->unit_desc.unit_index = lu->qdev.lun;
1312 lu->unit_desc.logical_block_count =
1313 cpu_to_be64(brdv_len / (1 << lu->unit_desc.logical_block_size));
1316 static bool ufs_lu_check_constraints(UfsLu *lu, Error **errp)
1318 if (!lu->qdev.conf.blk) {
1319 error_setg(errp, "drive property not set");
1320 return false;
1323 if (lu->qdev.channel != 0) {
1324 error_setg(errp, "ufs logical unit does not support channel");
1325 return false;
1328 if (lu->qdev.lun >= UFS_MAX_LUS) {
1329 error_setg(errp, "lun must be between 1 and %d", UFS_MAX_LUS - 1);
1330 return false;
1333 return true;
1336 static void ufs_lu_realize(SCSIDevice *dev, Error **errp)
1338 UfsLu *lu = DO_UPCAST(UfsLu, qdev, dev);
1339 BusState *s = qdev_get_parent_bus(&dev->qdev);
1340 UfsHc *u = UFS(s->parent);
1341 AioContext *ctx = NULL;
1342 uint64_t nb_sectors, nb_blocks;
1344 if (!ufs_lu_check_constraints(lu, errp)) {
1345 return;
1348 if (lu->qdev.conf.blk) {
1349 ctx = blk_get_aio_context(lu->qdev.conf.blk);
1350 aio_context_acquire(ctx);
1351 if (!blkconf_blocksizes(&lu->qdev.conf, errp)) {
1352 goto out;
1355 lu->qdev.blocksize = UFS_BLOCK_SIZE;
1356 blk_get_geometry(lu->qdev.conf.blk, &nb_sectors);
1357 nb_blocks = nb_sectors / (lu->qdev.blocksize / BDRV_SECTOR_SIZE);
1358 if (nb_blocks > UINT32_MAX) {
1359 nb_blocks = UINT32_MAX;
1361 lu->qdev.max_lba = nb_blocks;
1362 lu->qdev.type = TYPE_DISK;
1364 ufs_init_lu(lu);
1365 if (!ufs_add_lu(u, lu, errp)) {
1366 goto out;
1369 ufs_lu_brdv_init(lu, errp);
1370 out:
1371 if (ctx) {
1372 aio_context_release(ctx);
1376 static void ufs_lu_unrealize(SCSIDevice *dev)
1378 UfsLu *lu = DO_UPCAST(UfsLu, qdev, dev);
1380 blk_drain(lu->qdev.conf.blk);
1383 static void ufs_wlu_realize(DeviceState *qdev, Error **errp)
1385 UfsWLu *wlu = UFSWLU(qdev);
1386 SCSIDevice *dev = &wlu->qdev;
1388 if (!is_wlun(dev->lun)) {
1389 error_setg(errp, "not well-known logical unit number");
1390 return;
1393 QTAILQ_INIT(&dev->requests);
1396 static void ufs_lu_class_init(ObjectClass *oc, void *data)
1398 DeviceClass *dc = DEVICE_CLASS(oc);
1399 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(oc);
1401 sc->realize = ufs_lu_realize;
1402 sc->unrealize = ufs_lu_unrealize;
1403 sc->alloc_req = scsi_new_request;
1404 dc->bus_type = TYPE_UFS_BUS;
1405 device_class_set_props(dc, ufs_lu_props);
1406 dc->desc = "Virtual UFS logical unit";
1409 static void ufs_wlu_class_init(ObjectClass *oc, void *data)
1411 DeviceClass *dc = DEVICE_CLASS(oc);
1412 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(oc);
1415 * The realize() function of TYPE_SCSI_DEVICE causes a segmentation fault
1416 * if a block drive does not exist. Define a new realize function for
1417 * well-known LUs that do not have a block drive.
1419 dc->realize = ufs_wlu_realize;
1420 sc->alloc_req = scsi_new_request;
1421 dc->bus_type = TYPE_UFS_BUS;
1422 dc->desc = "Virtual UFS well-known logical unit";
1425 static const TypeInfo ufs_lu_info = {
1426 .name = TYPE_UFS_LU,
1427 .parent = TYPE_SCSI_DEVICE,
1428 .class_init = ufs_lu_class_init,
1429 .instance_size = sizeof(UfsLu),
1432 static const TypeInfo ufs_wlu_info = {
1433 .name = TYPE_UFS_WLU,
1434 .parent = TYPE_SCSI_DEVICE,
1435 .class_init = ufs_wlu_class_init,
1436 .instance_size = sizeof(UfsWLu),
1439 static void ufs_lu_register_types(void)
1441 type_register_static(&ufs_lu_info);
1442 type_register_static(&ufs_wlu_info);
1445 type_init(ufs_lu_register_types)