hw/arm/xlnx-zynqmp: Remove 'hw/arm/boot.h' from header
[qemu/ar7.git] / hw / ufs / ufs.c
blob2e6d582cc3f4db4637f6cc1aa312e42a1fd336ac
1 /*
2 * QEMU Universal Flash Storage (UFS) Controller
4 * Copyright (c) 2023 Samsung Electronics Co., Ltd. All rights reserved.
6 * Written by Jeuk Kim <jeuk20.kim@samsung.com>
8 * SPDX-License-Identifier: GPL-2.0-or-later
9 */
11 /**
12 * Reference Specs: https://www.jedec.org/, 3.1
14 * Usage
15 * -----
17 * Add options:
18 * -drive file=<file>,if=none,id=<drive_id>
19 * -device ufs,serial=<serial>,id=<bus_name>, \
20 * nutrs=<N[optional]>,nutmrs=<N[optional]>
21 * -device ufs-lu,drive=<drive_id>,bus=<bus_name>
24 #include "qemu/osdep.h"
25 #include "qapi/error.h"
26 #include "migration/vmstate.h"
27 #include "trace.h"
28 #include "ufs.h"
30 /* The QEMU-UFS device follows spec version 3.1 */
31 #define UFS_SPEC_VER 0x0310
32 #define UFS_MAX_NUTRS 32
33 #define UFS_MAX_NUTMRS 8
35 static MemTxResult ufs_addr_read(UfsHc *u, hwaddr addr, void *buf, int size)
37 hwaddr hi = addr + size - 1;
39 if (hi < addr) {
40 return MEMTX_DECODE_ERROR;
43 if (!FIELD_EX32(u->reg.cap, CAP, 64AS) && (hi >> 32)) {
44 return MEMTX_DECODE_ERROR;
47 return pci_dma_read(PCI_DEVICE(u), addr, buf, size);
50 static MemTxResult ufs_addr_write(UfsHc *u, hwaddr addr, const void *buf,
51 int size)
53 hwaddr hi = addr + size - 1;
54 if (hi < addr) {
55 return MEMTX_DECODE_ERROR;
58 if (!FIELD_EX32(u->reg.cap, CAP, 64AS) && (hi >> 32)) {
59 return MEMTX_DECODE_ERROR;
62 return pci_dma_write(PCI_DEVICE(u), addr, buf, size);
65 static void ufs_complete_req(UfsRequest *req, UfsReqResult req_result);
67 static inline hwaddr ufs_get_utrd_addr(UfsHc *u, uint32_t slot)
69 hwaddr utrl_base_addr = (((hwaddr)u->reg.utrlbau) << 32) + u->reg.utrlba;
70 hwaddr utrd_addr = utrl_base_addr + slot * sizeof(UtpTransferReqDesc);
72 return utrd_addr;
75 static inline hwaddr ufs_get_req_upiu_base_addr(const UtpTransferReqDesc *utrd)
77 uint32_t cmd_desc_base_addr_lo =
78 le32_to_cpu(utrd->command_desc_base_addr_lo);
79 uint32_t cmd_desc_base_addr_hi =
80 le32_to_cpu(utrd->command_desc_base_addr_hi);
82 return (((hwaddr)cmd_desc_base_addr_hi) << 32) + cmd_desc_base_addr_lo;
85 static inline hwaddr ufs_get_rsp_upiu_base_addr(const UtpTransferReqDesc *utrd)
87 hwaddr req_upiu_base_addr = ufs_get_req_upiu_base_addr(utrd);
88 uint32_t rsp_upiu_byte_off =
89 le16_to_cpu(utrd->response_upiu_offset) * sizeof(uint32_t);
90 return req_upiu_base_addr + rsp_upiu_byte_off;
93 static MemTxResult ufs_dma_read_utrd(UfsRequest *req)
95 UfsHc *u = req->hc;
96 hwaddr utrd_addr = ufs_get_utrd_addr(u, req->slot);
97 MemTxResult ret;
99 ret = ufs_addr_read(u, utrd_addr, &req->utrd, sizeof(req->utrd));
100 if (ret) {
101 trace_ufs_err_dma_read_utrd(req->slot, utrd_addr);
103 return ret;
106 static MemTxResult ufs_dma_read_req_upiu(UfsRequest *req)
108 UfsHc *u = req->hc;
109 hwaddr req_upiu_base_addr = ufs_get_req_upiu_base_addr(&req->utrd);
110 UtpUpiuReq *req_upiu = &req->req_upiu;
111 uint32_t copy_size;
112 uint16_t data_segment_length;
113 MemTxResult ret;
116 * To know the size of the req_upiu, we need to read the
117 * data_segment_length in the header first.
119 ret = ufs_addr_read(u, req_upiu_base_addr, &req_upiu->header,
120 sizeof(UtpUpiuHeader));
121 if (ret) {
122 trace_ufs_err_dma_read_req_upiu(req->slot, req_upiu_base_addr);
123 return ret;
125 data_segment_length = be16_to_cpu(req_upiu->header.data_segment_length);
127 copy_size = sizeof(UtpUpiuHeader) + UFS_TRANSACTION_SPECIFIC_FIELD_SIZE +
128 data_segment_length;
130 ret = ufs_addr_read(u, req_upiu_base_addr, &req->req_upiu, copy_size);
131 if (ret) {
132 trace_ufs_err_dma_read_req_upiu(req->slot, req_upiu_base_addr);
134 return ret;
137 static MemTxResult ufs_dma_read_prdt(UfsRequest *req)
139 UfsHc *u = req->hc;
140 uint16_t prdt_len = le16_to_cpu(req->utrd.prd_table_length);
141 uint16_t prdt_byte_off =
142 le16_to_cpu(req->utrd.prd_table_offset) * sizeof(uint32_t);
143 uint32_t prdt_size = prdt_len * sizeof(UfshcdSgEntry);
144 g_autofree UfshcdSgEntry *prd_entries = NULL;
145 hwaddr req_upiu_base_addr, prdt_base_addr;
146 int err;
148 assert(!req->sg);
150 if (prdt_size == 0) {
151 return MEMTX_OK;
153 prd_entries = g_new(UfshcdSgEntry, prdt_size);
155 req_upiu_base_addr = ufs_get_req_upiu_base_addr(&req->utrd);
156 prdt_base_addr = req_upiu_base_addr + prdt_byte_off;
158 err = ufs_addr_read(u, prdt_base_addr, prd_entries, prdt_size);
159 if (err) {
160 trace_ufs_err_dma_read_prdt(req->slot, prdt_base_addr);
161 return err;
164 req->sg = g_malloc0(sizeof(QEMUSGList));
165 pci_dma_sglist_init(req->sg, PCI_DEVICE(u), prdt_len);
167 for (uint16_t i = 0; i < prdt_len; ++i) {
168 hwaddr data_dma_addr = le64_to_cpu(prd_entries[i].addr);
169 uint32_t data_byte_count = le32_to_cpu(prd_entries[i].size) + 1;
170 qemu_sglist_add(req->sg, data_dma_addr, data_byte_count);
172 return MEMTX_OK;
175 static MemTxResult ufs_dma_read_upiu(UfsRequest *req)
177 MemTxResult ret;
179 ret = ufs_dma_read_utrd(req);
180 if (ret) {
181 return ret;
184 ret = ufs_dma_read_req_upiu(req);
185 if (ret) {
186 return ret;
189 ret = ufs_dma_read_prdt(req);
190 if (ret) {
191 return ret;
194 return 0;
197 static MemTxResult ufs_dma_write_utrd(UfsRequest *req)
199 UfsHc *u = req->hc;
200 hwaddr utrd_addr = ufs_get_utrd_addr(u, req->slot);
201 MemTxResult ret;
203 ret = ufs_addr_write(u, utrd_addr, &req->utrd, sizeof(req->utrd));
204 if (ret) {
205 trace_ufs_err_dma_write_utrd(req->slot, utrd_addr);
207 return ret;
210 static MemTxResult ufs_dma_write_rsp_upiu(UfsRequest *req)
212 UfsHc *u = req->hc;
213 hwaddr rsp_upiu_base_addr = ufs_get_rsp_upiu_base_addr(&req->utrd);
214 uint32_t rsp_upiu_byte_len =
215 le16_to_cpu(req->utrd.response_upiu_length) * sizeof(uint32_t);
216 uint16_t data_segment_length =
217 be16_to_cpu(req->rsp_upiu.header.data_segment_length);
218 uint32_t copy_size = sizeof(UtpUpiuHeader) +
219 UFS_TRANSACTION_SPECIFIC_FIELD_SIZE +
220 data_segment_length;
221 MemTxResult ret;
223 if (copy_size > rsp_upiu_byte_len) {
224 copy_size = rsp_upiu_byte_len;
227 ret = ufs_addr_write(u, rsp_upiu_base_addr, &req->rsp_upiu, copy_size);
228 if (ret) {
229 trace_ufs_err_dma_write_rsp_upiu(req->slot, rsp_upiu_base_addr);
231 return ret;
234 static MemTxResult ufs_dma_write_upiu(UfsRequest *req)
236 MemTxResult ret;
238 ret = ufs_dma_write_rsp_upiu(req);
239 if (ret) {
240 return ret;
243 return ufs_dma_write_utrd(req);
246 static void ufs_irq_check(UfsHc *u)
248 PCIDevice *pci = PCI_DEVICE(u);
250 if ((u->reg.is & UFS_INTR_MASK) & u->reg.ie) {
251 trace_ufs_irq_raise();
252 pci_irq_assert(pci);
253 } else {
254 trace_ufs_irq_lower();
255 pci_irq_deassert(pci);
259 static void ufs_process_db(UfsHc *u, uint32_t val)
261 DECLARE_BITMAP(doorbell, UFS_MAX_NUTRS);
262 uint32_t slot;
263 uint32_t nutrs = u->params.nutrs;
264 UfsRequest *req;
266 val &= ~u->reg.utrldbr;
267 if (!val) {
268 return;
271 doorbell[0] = val;
272 slot = find_first_bit(doorbell, nutrs);
274 while (slot < nutrs) {
275 req = &u->req_list[slot];
276 if (req->state == UFS_REQUEST_ERROR) {
277 trace_ufs_err_utrl_slot_error(req->slot);
278 return;
281 if (req->state != UFS_REQUEST_IDLE) {
282 trace_ufs_err_utrl_slot_busy(req->slot);
283 return;
286 trace_ufs_process_db(slot);
287 req->state = UFS_REQUEST_READY;
288 slot = find_next_bit(doorbell, nutrs, slot + 1);
291 qemu_bh_schedule(u->doorbell_bh);
294 static void ufs_process_uiccmd(UfsHc *u, uint32_t val)
296 trace_ufs_process_uiccmd(val, u->reg.ucmdarg1, u->reg.ucmdarg2,
297 u->reg.ucmdarg3);
299 * Only the essential uic commands for running drivers on Linux and Windows
300 * are implemented.
302 switch (val) {
303 case UFS_UIC_CMD_DME_LINK_STARTUP:
304 u->reg.hcs = FIELD_DP32(u->reg.hcs, HCS, DP, 1);
305 u->reg.hcs = FIELD_DP32(u->reg.hcs, HCS, UTRLRDY, 1);
306 u->reg.hcs = FIELD_DP32(u->reg.hcs, HCS, UTMRLRDY, 1);
307 u->reg.ucmdarg2 = UFS_UIC_CMD_RESULT_SUCCESS;
308 break;
309 /* TODO: Revisit it when Power Management is implemented */
310 case UFS_UIC_CMD_DME_HIBER_ENTER:
311 u->reg.is = FIELD_DP32(u->reg.is, IS, UHES, 1);
312 u->reg.hcs = FIELD_DP32(u->reg.hcs, HCS, UPMCRS, UFS_PWR_LOCAL);
313 u->reg.ucmdarg2 = UFS_UIC_CMD_RESULT_SUCCESS;
314 break;
315 case UFS_UIC_CMD_DME_HIBER_EXIT:
316 u->reg.is = FIELD_DP32(u->reg.is, IS, UHXS, 1);
317 u->reg.hcs = FIELD_DP32(u->reg.hcs, HCS, UPMCRS, UFS_PWR_LOCAL);
318 u->reg.ucmdarg2 = UFS_UIC_CMD_RESULT_SUCCESS;
319 break;
320 default:
321 u->reg.ucmdarg2 = UFS_UIC_CMD_RESULT_FAILURE;
324 u->reg.is = FIELD_DP32(u->reg.is, IS, UCCS, 1);
326 ufs_irq_check(u);
329 static void ufs_write_reg(UfsHc *u, hwaddr offset, uint32_t data, unsigned size)
331 switch (offset) {
332 case A_IS:
333 u->reg.is &= ~data;
334 ufs_irq_check(u);
335 break;
336 case A_IE:
337 u->reg.ie = data;
338 ufs_irq_check(u);
339 break;
340 case A_HCE:
341 if (!FIELD_EX32(u->reg.hce, HCE, HCE) && FIELD_EX32(data, HCE, HCE)) {
342 u->reg.hcs = FIELD_DP32(u->reg.hcs, HCS, UCRDY, 1);
343 u->reg.hce = FIELD_DP32(u->reg.hce, HCE, HCE, 1);
344 } else if (FIELD_EX32(u->reg.hce, HCE, HCE) &&
345 !FIELD_EX32(data, HCE, HCE)) {
346 u->reg.hcs = 0;
347 u->reg.hce = FIELD_DP32(u->reg.hce, HCE, HCE, 0);
349 break;
350 case A_UTRLBA:
351 u->reg.utrlba = data & R_UTRLBA_UTRLBA_MASK;
352 break;
353 case A_UTRLBAU:
354 u->reg.utrlbau = data;
355 break;
356 case A_UTRLDBR:
357 ufs_process_db(u, data);
358 u->reg.utrldbr |= data;
359 break;
360 case A_UTRLRSR:
361 u->reg.utrlrsr = data;
362 break;
363 case A_UTRLCNR:
364 u->reg.utrlcnr &= ~data;
365 break;
366 case A_UTMRLBA:
367 u->reg.utmrlba = data & R_UTMRLBA_UTMRLBA_MASK;
368 break;
369 case A_UTMRLBAU:
370 u->reg.utmrlbau = data;
371 break;
372 case A_UICCMD:
373 ufs_process_uiccmd(u, data);
374 break;
375 case A_UCMDARG1:
376 u->reg.ucmdarg1 = data;
377 break;
378 case A_UCMDARG2:
379 u->reg.ucmdarg2 = data;
380 break;
381 case A_UCMDARG3:
382 u->reg.ucmdarg3 = data;
383 break;
384 case A_UTRLCLR:
385 case A_UTMRLDBR:
386 case A_UTMRLCLR:
387 case A_UTMRLRSR:
388 trace_ufs_err_unsupport_register_offset(offset);
389 break;
390 default:
391 trace_ufs_err_invalid_register_offset(offset);
392 break;
396 static uint64_t ufs_mmio_read(void *opaque, hwaddr addr, unsigned size)
398 UfsHc *u = (UfsHc *)opaque;
399 uint8_t *ptr = (uint8_t *)&u->reg;
400 uint64_t value;
402 if (addr > sizeof(u->reg) - size) {
403 trace_ufs_err_invalid_register_offset(addr);
404 return 0;
407 value = *(uint32_t *)(ptr + addr);
408 trace_ufs_mmio_read(addr, value, size);
409 return value;
412 static void ufs_mmio_write(void *opaque, hwaddr addr, uint64_t data,
413 unsigned size)
415 UfsHc *u = (UfsHc *)opaque;
417 if (addr > sizeof(u->reg) - size) {
418 trace_ufs_err_invalid_register_offset(addr);
419 return;
422 trace_ufs_mmio_write(addr, data, size);
423 ufs_write_reg(u, addr, data, size);
426 static const MemoryRegionOps ufs_mmio_ops = {
427 .read = ufs_mmio_read,
428 .write = ufs_mmio_write,
429 .endianness = DEVICE_LITTLE_ENDIAN,
430 .impl = {
431 .min_access_size = 4,
432 .max_access_size = 4,
436 static QEMUSGList *ufs_get_sg_list(SCSIRequest *scsi_req)
438 UfsRequest *req = scsi_req->hba_private;
439 return req->sg;
442 static void ufs_build_upiu_sense_data(UfsRequest *req, SCSIRequest *scsi_req)
444 req->rsp_upiu.sr.sense_data_len = cpu_to_be16(scsi_req->sense_len);
445 assert(scsi_req->sense_len <= SCSI_SENSE_LEN);
446 memcpy(req->rsp_upiu.sr.sense_data, scsi_req->sense, scsi_req->sense_len);
449 static void ufs_build_upiu_header(UfsRequest *req, uint8_t trans_type,
450 uint8_t flags, uint8_t response,
451 uint8_t scsi_status,
452 uint16_t data_segment_length)
454 memcpy(&req->rsp_upiu.header, &req->req_upiu.header, sizeof(UtpUpiuHeader));
455 req->rsp_upiu.header.trans_type = trans_type;
456 req->rsp_upiu.header.flags = flags;
457 req->rsp_upiu.header.response = response;
458 req->rsp_upiu.header.scsi_status = scsi_status;
459 req->rsp_upiu.header.data_segment_length = cpu_to_be16(data_segment_length);
462 static void ufs_scsi_command_complete(SCSIRequest *scsi_req, size_t resid)
464 UfsRequest *req = scsi_req->hba_private;
465 int16_t status = scsi_req->status;
466 uint32_t expected_len = be32_to_cpu(req->req_upiu.sc.exp_data_transfer_len);
467 uint32_t transfered_len = scsi_req->cmd.xfer - resid;
468 uint8_t flags = 0, response = UFS_COMMAND_RESULT_SUCESS;
469 uint16_t data_segment_length;
471 if (expected_len > transfered_len) {
472 req->rsp_upiu.sr.residual_transfer_count =
473 cpu_to_be32(expected_len - transfered_len);
474 flags |= UFS_UPIU_FLAG_UNDERFLOW;
475 } else if (expected_len < transfered_len) {
476 req->rsp_upiu.sr.residual_transfer_count =
477 cpu_to_be32(transfered_len - expected_len);
478 flags |= UFS_UPIU_FLAG_OVERFLOW;
481 if (status != 0) {
482 ufs_build_upiu_sense_data(req, scsi_req);
483 response = UFS_COMMAND_RESULT_FAIL;
486 data_segment_length = cpu_to_be16(scsi_req->sense_len +
487 sizeof(req->rsp_upiu.sr.sense_data_len));
488 ufs_build_upiu_header(req, UFS_UPIU_TRANSACTION_RESPONSE, flags, response,
489 status, data_segment_length);
491 ufs_complete_req(req, UFS_REQUEST_SUCCESS);
493 scsi_req->hba_private = NULL;
494 scsi_req_unref(scsi_req);
497 static const struct SCSIBusInfo ufs_scsi_info = {
498 .tcq = true,
499 .max_target = 0,
500 .max_lun = UFS_MAX_LUS,
501 .max_channel = 0,
503 .get_sg_list = ufs_get_sg_list,
504 .complete = ufs_scsi_command_complete,
507 static UfsReqResult ufs_exec_scsi_cmd(UfsRequest *req)
509 UfsHc *u = req->hc;
510 uint8_t lun = req->req_upiu.header.lun;
511 uint8_t task_tag = req->req_upiu.header.task_tag;
512 SCSIDevice *dev = NULL;
514 trace_ufs_exec_scsi_cmd(req->slot, lun, req->req_upiu.sc.cdb[0]);
516 if (!is_wlun(lun)) {
517 if (lun >= u->device_desc.number_lu) {
518 trace_ufs_err_scsi_cmd_invalid_lun(lun);
519 return UFS_REQUEST_FAIL;
520 } else if (u->lus[lun] == NULL) {
521 trace_ufs_err_scsi_cmd_invalid_lun(lun);
522 return UFS_REQUEST_FAIL;
526 switch (lun) {
527 case UFS_UPIU_REPORT_LUNS_WLUN:
528 dev = &u->report_wlu->qdev;
529 break;
530 case UFS_UPIU_UFS_DEVICE_WLUN:
531 dev = &u->dev_wlu->qdev;
532 break;
533 case UFS_UPIU_BOOT_WLUN:
534 dev = &u->boot_wlu->qdev;
535 break;
536 case UFS_UPIU_RPMB_WLUN:
537 dev = &u->rpmb_wlu->qdev;
538 break;
539 default:
540 dev = &u->lus[lun]->qdev;
543 SCSIRequest *scsi_req = scsi_req_new(
544 dev, task_tag, lun, req->req_upiu.sc.cdb, UFS_CDB_SIZE, req);
546 uint32_t len = scsi_req_enqueue(scsi_req);
547 if (len) {
548 scsi_req_continue(scsi_req);
551 return UFS_REQUEST_NO_COMPLETE;
554 static UfsReqResult ufs_exec_nop_cmd(UfsRequest *req)
556 trace_ufs_exec_nop_cmd(req->slot);
557 ufs_build_upiu_header(req, UFS_UPIU_TRANSACTION_NOP_IN, 0, 0, 0, 0);
558 return UFS_REQUEST_SUCCESS;
562 * This defines the permission of flags based on their IDN. There are some
563 * things that are declared read-only, which is inconsistent with the ufs spec,
564 * because we want to return an error for features that are not yet supported.
566 static const int flag_permission[UFS_QUERY_FLAG_IDN_COUNT] = {
567 [UFS_QUERY_FLAG_IDN_FDEVICEINIT] = UFS_QUERY_FLAG_READ | UFS_QUERY_FLAG_SET,
568 /* Write protection is not supported */
569 [UFS_QUERY_FLAG_IDN_PERMANENT_WPE] = UFS_QUERY_FLAG_READ,
570 [UFS_QUERY_FLAG_IDN_PWR_ON_WPE] = UFS_QUERY_FLAG_READ,
571 [UFS_QUERY_FLAG_IDN_BKOPS_EN] = UFS_QUERY_FLAG_READ | UFS_QUERY_FLAG_SET |
572 UFS_QUERY_FLAG_CLEAR |
573 UFS_QUERY_FLAG_TOGGLE,
574 [UFS_QUERY_FLAG_IDN_LIFE_SPAN_MODE_ENABLE] =
575 UFS_QUERY_FLAG_READ | UFS_QUERY_FLAG_SET | UFS_QUERY_FLAG_CLEAR |
576 UFS_QUERY_FLAG_TOGGLE,
577 /* Purge Operation is not supported */
578 [UFS_QUERY_FLAG_IDN_PURGE_ENABLE] = UFS_QUERY_FLAG_NONE,
579 /* Refresh Operation is not supported */
580 [UFS_QUERY_FLAG_IDN_REFRESH_ENABLE] = UFS_QUERY_FLAG_NONE,
581 /* Physical Resource Removal is not supported */
582 [UFS_QUERY_FLAG_IDN_FPHYRESOURCEREMOVAL] = UFS_QUERY_FLAG_READ,
583 [UFS_QUERY_FLAG_IDN_BUSY_RTC] = UFS_QUERY_FLAG_READ,
584 [UFS_QUERY_FLAG_IDN_PERMANENTLY_DISABLE_FW_UPDATE] = UFS_QUERY_FLAG_READ,
585 /* Write Booster is not supported */
586 [UFS_QUERY_FLAG_IDN_WB_EN] = UFS_QUERY_FLAG_READ,
587 [UFS_QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN] = UFS_QUERY_FLAG_READ,
588 [UFS_QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8] = UFS_QUERY_FLAG_READ,
591 static inline QueryRespCode ufs_flag_check_idn_valid(uint8_t idn, int op)
593 if (idn >= UFS_QUERY_FLAG_IDN_COUNT) {
594 return UFS_QUERY_RESULT_INVALID_IDN;
597 if (!(flag_permission[idn] & op)) {
598 if (op == UFS_QUERY_FLAG_READ) {
599 trace_ufs_err_query_flag_not_readable(idn);
600 return UFS_QUERY_RESULT_NOT_READABLE;
602 trace_ufs_err_query_flag_not_writable(idn);
603 return UFS_QUERY_RESULT_NOT_WRITEABLE;
606 return UFS_QUERY_RESULT_SUCCESS;
609 static const int attr_permission[UFS_QUERY_ATTR_IDN_COUNT] = {
610 /* booting is not supported */
611 [UFS_QUERY_ATTR_IDN_BOOT_LU_EN] = UFS_QUERY_ATTR_READ,
612 [UFS_QUERY_ATTR_IDN_POWER_MODE] = UFS_QUERY_ATTR_READ,
613 [UFS_QUERY_ATTR_IDN_ACTIVE_ICC_LVL] =
614 UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
615 [UFS_QUERY_ATTR_IDN_OOO_DATA_EN] = UFS_QUERY_ATTR_READ,
616 [UFS_QUERY_ATTR_IDN_BKOPS_STATUS] = UFS_QUERY_ATTR_READ,
617 [UFS_QUERY_ATTR_IDN_PURGE_STATUS] = UFS_QUERY_ATTR_READ,
618 [UFS_QUERY_ATTR_IDN_MAX_DATA_IN] =
619 UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
620 [UFS_QUERY_ATTR_IDN_MAX_DATA_OUT] =
621 UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
622 [UFS_QUERY_ATTR_IDN_DYN_CAP_NEEDED] = UFS_QUERY_ATTR_READ,
623 [UFS_QUERY_ATTR_IDN_REF_CLK_FREQ] =
624 UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
625 [UFS_QUERY_ATTR_IDN_CONF_DESC_LOCK] = UFS_QUERY_ATTR_READ,
626 [UFS_QUERY_ATTR_IDN_MAX_NUM_OF_RTT] =
627 UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
628 [UFS_QUERY_ATTR_IDN_EE_CONTROL] =
629 UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
630 [UFS_QUERY_ATTR_IDN_EE_STATUS] = UFS_QUERY_ATTR_READ,
631 [UFS_QUERY_ATTR_IDN_SECONDS_PASSED] = UFS_QUERY_ATTR_WRITE,
632 [UFS_QUERY_ATTR_IDN_CNTX_CONF] = UFS_QUERY_ATTR_READ,
633 [UFS_QUERY_ATTR_IDN_FFU_STATUS] = UFS_QUERY_ATTR_READ,
634 [UFS_QUERY_ATTR_IDN_PSA_STATE] = UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
635 [UFS_QUERY_ATTR_IDN_PSA_DATA_SIZE] =
636 UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE,
637 [UFS_QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME] = UFS_QUERY_ATTR_READ,
638 [UFS_QUERY_ATTR_IDN_CASE_ROUGH_TEMP] = UFS_QUERY_ATTR_READ,
639 [UFS_QUERY_ATTR_IDN_HIGH_TEMP_BOUND] = UFS_QUERY_ATTR_READ,
640 [UFS_QUERY_ATTR_IDN_LOW_TEMP_BOUND] = UFS_QUERY_ATTR_READ,
641 [UFS_QUERY_ATTR_IDN_THROTTLING_STATUS] = UFS_QUERY_ATTR_READ,
642 [UFS_QUERY_ATTR_IDN_WB_FLUSH_STATUS] = UFS_QUERY_ATTR_READ,
643 [UFS_QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE] = UFS_QUERY_ATTR_READ,
644 [UFS_QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST] = UFS_QUERY_ATTR_READ,
645 [UFS_QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE] = UFS_QUERY_ATTR_READ,
646 /* refresh operation is not supported */
647 [UFS_QUERY_ATTR_IDN_REFRESH_STATUS] = UFS_QUERY_ATTR_READ,
648 [UFS_QUERY_ATTR_IDN_REFRESH_FREQ] = UFS_QUERY_ATTR_READ,
649 [UFS_QUERY_ATTR_IDN_REFRESH_UNIT] = UFS_QUERY_ATTR_READ,
652 static inline QueryRespCode ufs_attr_check_idn_valid(uint8_t idn, int op)
654 if (idn >= UFS_QUERY_ATTR_IDN_COUNT) {
655 return UFS_QUERY_RESULT_INVALID_IDN;
658 if (!(attr_permission[idn] & op)) {
659 if (op == UFS_QUERY_ATTR_READ) {
660 trace_ufs_err_query_attr_not_readable(idn);
661 return UFS_QUERY_RESULT_NOT_READABLE;
663 trace_ufs_err_query_attr_not_writable(idn);
664 return UFS_QUERY_RESULT_NOT_WRITEABLE;
667 return UFS_QUERY_RESULT_SUCCESS;
670 static QueryRespCode ufs_exec_query_flag(UfsRequest *req, int op)
672 UfsHc *u = req->hc;
673 uint8_t idn = req->req_upiu.qr.idn;
674 uint32_t value;
675 QueryRespCode ret;
677 ret = ufs_flag_check_idn_valid(idn, op);
678 if (ret) {
679 return ret;
682 if (idn == UFS_QUERY_FLAG_IDN_FDEVICEINIT) {
683 value = 0;
684 } else if (op == UFS_QUERY_FLAG_READ) {
685 value = *(((uint8_t *)&u->flags) + idn);
686 } else if (op == UFS_QUERY_FLAG_SET) {
687 value = 1;
688 } else if (op == UFS_QUERY_FLAG_CLEAR) {
689 value = 0;
690 } else if (op == UFS_QUERY_FLAG_TOGGLE) {
691 value = *(((uint8_t *)&u->flags) + idn);
692 value = !value;
693 } else {
694 trace_ufs_err_query_invalid_opcode(op);
695 return UFS_QUERY_RESULT_INVALID_OPCODE;
698 *(((uint8_t *)&u->flags) + idn) = value;
699 req->rsp_upiu.qr.value = cpu_to_be32(value);
700 return UFS_QUERY_RESULT_SUCCESS;
703 static uint32_t ufs_read_attr_value(UfsHc *u, uint8_t idn)
705 switch (idn) {
706 case UFS_QUERY_ATTR_IDN_BOOT_LU_EN:
707 return u->attributes.boot_lun_en;
708 case UFS_QUERY_ATTR_IDN_POWER_MODE:
709 return u->attributes.current_power_mode;
710 case UFS_QUERY_ATTR_IDN_ACTIVE_ICC_LVL:
711 return u->attributes.active_icc_level;
712 case UFS_QUERY_ATTR_IDN_OOO_DATA_EN:
713 return u->attributes.out_of_order_data_en;
714 case UFS_QUERY_ATTR_IDN_BKOPS_STATUS:
715 return u->attributes.background_op_status;
716 case UFS_QUERY_ATTR_IDN_PURGE_STATUS:
717 return u->attributes.purge_status;
718 case UFS_QUERY_ATTR_IDN_MAX_DATA_IN:
719 return u->attributes.max_data_in_size;
720 case UFS_QUERY_ATTR_IDN_MAX_DATA_OUT:
721 return u->attributes.max_data_out_size;
722 case UFS_QUERY_ATTR_IDN_DYN_CAP_NEEDED:
723 return be32_to_cpu(u->attributes.dyn_cap_needed);
724 case UFS_QUERY_ATTR_IDN_REF_CLK_FREQ:
725 return u->attributes.ref_clk_freq;
726 case UFS_QUERY_ATTR_IDN_CONF_DESC_LOCK:
727 return u->attributes.config_descr_lock;
728 case UFS_QUERY_ATTR_IDN_MAX_NUM_OF_RTT:
729 return u->attributes.max_num_of_rtt;
730 case UFS_QUERY_ATTR_IDN_EE_CONTROL:
731 return be16_to_cpu(u->attributes.exception_event_control);
732 case UFS_QUERY_ATTR_IDN_EE_STATUS:
733 return be16_to_cpu(u->attributes.exception_event_status);
734 case UFS_QUERY_ATTR_IDN_SECONDS_PASSED:
735 return be32_to_cpu(u->attributes.seconds_passed);
736 case UFS_QUERY_ATTR_IDN_CNTX_CONF:
737 return be16_to_cpu(u->attributes.context_conf);
738 case UFS_QUERY_ATTR_IDN_FFU_STATUS:
739 return u->attributes.device_ffu_status;
740 case UFS_QUERY_ATTR_IDN_PSA_STATE:
741 return be32_to_cpu(u->attributes.psa_state);
742 case UFS_QUERY_ATTR_IDN_PSA_DATA_SIZE:
743 return be32_to_cpu(u->attributes.psa_data_size);
744 case UFS_QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME:
745 return u->attributes.ref_clk_gating_wait_time;
746 case UFS_QUERY_ATTR_IDN_CASE_ROUGH_TEMP:
747 return u->attributes.device_case_rough_temperaure;
748 case UFS_QUERY_ATTR_IDN_HIGH_TEMP_BOUND:
749 return u->attributes.device_too_high_temp_boundary;
750 case UFS_QUERY_ATTR_IDN_LOW_TEMP_BOUND:
751 return u->attributes.device_too_low_temp_boundary;
752 case UFS_QUERY_ATTR_IDN_THROTTLING_STATUS:
753 return u->attributes.throttling_status;
754 case UFS_QUERY_ATTR_IDN_WB_FLUSH_STATUS:
755 return u->attributes.wb_buffer_flush_status;
756 case UFS_QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE:
757 return u->attributes.available_wb_buffer_size;
758 case UFS_QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST:
759 return u->attributes.wb_buffer_life_time_est;
760 case UFS_QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE:
761 return be32_to_cpu(u->attributes.current_wb_buffer_size);
762 case UFS_QUERY_ATTR_IDN_REFRESH_STATUS:
763 return u->attributes.refresh_status;
764 case UFS_QUERY_ATTR_IDN_REFRESH_FREQ:
765 return u->attributes.refresh_freq;
766 case UFS_QUERY_ATTR_IDN_REFRESH_UNIT:
767 return u->attributes.refresh_unit;
769 return 0;
772 static void ufs_write_attr_value(UfsHc *u, uint8_t idn, uint32_t value)
774 switch (idn) {
775 case UFS_QUERY_ATTR_IDN_ACTIVE_ICC_LVL:
776 u->attributes.active_icc_level = value;
777 break;
778 case UFS_QUERY_ATTR_IDN_MAX_DATA_IN:
779 u->attributes.max_data_in_size = value;
780 break;
781 case UFS_QUERY_ATTR_IDN_MAX_DATA_OUT:
782 u->attributes.max_data_out_size = value;
783 break;
784 case UFS_QUERY_ATTR_IDN_REF_CLK_FREQ:
785 u->attributes.ref_clk_freq = value;
786 break;
787 case UFS_QUERY_ATTR_IDN_MAX_NUM_OF_RTT:
788 u->attributes.max_num_of_rtt = value;
789 break;
790 case UFS_QUERY_ATTR_IDN_EE_CONTROL:
791 u->attributes.exception_event_control = cpu_to_be16(value);
792 break;
793 case UFS_QUERY_ATTR_IDN_SECONDS_PASSED:
794 u->attributes.seconds_passed = cpu_to_be32(value);
795 break;
796 case UFS_QUERY_ATTR_IDN_PSA_STATE:
797 u->attributes.psa_state = value;
798 break;
799 case UFS_QUERY_ATTR_IDN_PSA_DATA_SIZE:
800 u->attributes.psa_data_size = cpu_to_be32(value);
801 break;
805 static QueryRespCode ufs_exec_query_attr(UfsRequest *req, int op)
807 UfsHc *u = req->hc;
808 uint8_t idn = req->req_upiu.qr.idn;
809 uint32_t value;
810 QueryRespCode ret;
812 ret = ufs_attr_check_idn_valid(idn, op);
813 if (ret) {
814 return ret;
817 if (op == UFS_QUERY_ATTR_READ) {
818 value = ufs_read_attr_value(u, idn);
819 } else {
820 value = be32_to_cpu(req->req_upiu.qr.value);
821 ufs_write_attr_value(u, idn, value);
824 req->rsp_upiu.qr.value = cpu_to_be32(value);
825 return UFS_QUERY_RESULT_SUCCESS;
828 static const RpmbUnitDescriptor rpmb_unit_desc = {
829 .length = sizeof(RpmbUnitDescriptor),
830 .descriptor_idn = 2,
831 .unit_index = UFS_UPIU_RPMB_WLUN,
832 .lu_enable = 0,
835 static QueryRespCode ufs_read_unit_desc(UfsRequest *req)
837 UfsHc *u = req->hc;
838 uint8_t lun = req->req_upiu.qr.index;
840 if (lun != UFS_UPIU_RPMB_WLUN &&
841 (lun >= UFS_MAX_LUS || u->lus[lun] == NULL)) {
842 trace_ufs_err_query_invalid_index(req->req_upiu.qr.opcode, lun);
843 return UFS_QUERY_RESULT_INVALID_INDEX;
846 if (lun == UFS_UPIU_RPMB_WLUN) {
847 memcpy(&req->rsp_upiu.qr.data, &rpmb_unit_desc, rpmb_unit_desc.length);
848 } else {
849 memcpy(&req->rsp_upiu.qr.data, &u->lus[lun]->unit_desc,
850 sizeof(u->lus[lun]->unit_desc));
853 return UFS_QUERY_RESULT_SUCCESS;
856 static inline StringDescriptor manufacturer_str_desc(void)
858 StringDescriptor desc = {
859 .length = 0x12,
860 .descriptor_idn = UFS_QUERY_DESC_IDN_STRING,
862 desc.UC[0] = cpu_to_be16('R');
863 desc.UC[1] = cpu_to_be16('E');
864 desc.UC[2] = cpu_to_be16('D');
865 desc.UC[3] = cpu_to_be16('H');
866 desc.UC[4] = cpu_to_be16('A');
867 desc.UC[5] = cpu_to_be16('T');
868 return desc;
871 static inline StringDescriptor product_name_str_desc(void)
873 StringDescriptor desc = {
874 .length = 0x22,
875 .descriptor_idn = UFS_QUERY_DESC_IDN_STRING,
877 desc.UC[0] = cpu_to_be16('Q');
878 desc.UC[1] = cpu_to_be16('E');
879 desc.UC[2] = cpu_to_be16('M');
880 desc.UC[3] = cpu_to_be16('U');
881 desc.UC[4] = cpu_to_be16(' ');
882 desc.UC[5] = cpu_to_be16('U');
883 desc.UC[6] = cpu_to_be16('F');
884 desc.UC[7] = cpu_to_be16('S');
885 return desc;
888 static inline StringDescriptor product_rev_level_str_desc(void)
890 StringDescriptor desc = {
891 .length = 0x0a,
892 .descriptor_idn = UFS_QUERY_DESC_IDN_STRING,
894 desc.UC[0] = cpu_to_be16('0');
895 desc.UC[1] = cpu_to_be16('0');
896 desc.UC[2] = cpu_to_be16('0');
897 desc.UC[3] = cpu_to_be16('1');
898 return desc;
901 static const StringDescriptor null_str_desc = {
902 .length = 0x02,
903 .descriptor_idn = UFS_QUERY_DESC_IDN_STRING,
906 static QueryRespCode ufs_read_string_desc(UfsRequest *req)
908 UfsHc *u = req->hc;
909 uint8_t index = req->req_upiu.qr.index;
910 StringDescriptor desc;
912 if (index == u->device_desc.manufacturer_name) {
913 desc = manufacturer_str_desc();
914 memcpy(&req->rsp_upiu.qr.data, &desc, desc.length);
915 } else if (index == u->device_desc.product_name) {
916 desc = product_name_str_desc();
917 memcpy(&req->rsp_upiu.qr.data, &desc, desc.length);
918 } else if (index == u->device_desc.serial_number) {
919 memcpy(&req->rsp_upiu.qr.data, &null_str_desc, null_str_desc.length);
920 } else if (index == u->device_desc.oem_id) {
921 memcpy(&req->rsp_upiu.qr.data, &null_str_desc, null_str_desc.length);
922 } else if (index == u->device_desc.product_revision_level) {
923 desc = product_rev_level_str_desc();
924 memcpy(&req->rsp_upiu.qr.data, &desc, desc.length);
925 } else {
926 trace_ufs_err_query_invalid_index(req->req_upiu.qr.opcode, index);
927 return UFS_QUERY_RESULT_INVALID_INDEX;
929 return UFS_QUERY_RESULT_SUCCESS;
932 static inline InterconnectDescriptor interconnect_desc(void)
934 InterconnectDescriptor desc = {
935 .length = sizeof(InterconnectDescriptor),
936 .descriptor_idn = UFS_QUERY_DESC_IDN_INTERCONNECT,
938 desc.bcd_unipro_version = cpu_to_be16(0x180);
939 desc.bcd_mphy_version = cpu_to_be16(0x410);
940 return desc;
943 static QueryRespCode ufs_read_desc(UfsRequest *req)
945 UfsHc *u = req->hc;
946 QueryRespCode status;
947 uint8_t idn = req->req_upiu.qr.idn;
948 uint16_t length = be16_to_cpu(req->req_upiu.qr.length);
949 InterconnectDescriptor desc;
951 switch (idn) {
952 case UFS_QUERY_DESC_IDN_DEVICE:
953 memcpy(&req->rsp_upiu.qr.data, &u->device_desc, sizeof(u->device_desc));
954 status = UFS_QUERY_RESULT_SUCCESS;
955 break;
956 case UFS_QUERY_DESC_IDN_UNIT:
957 status = ufs_read_unit_desc(req);
958 break;
959 case UFS_QUERY_DESC_IDN_GEOMETRY:
960 memcpy(&req->rsp_upiu.qr.data, &u->geometry_desc,
961 sizeof(u->geometry_desc));
962 status = UFS_QUERY_RESULT_SUCCESS;
963 break;
964 case UFS_QUERY_DESC_IDN_INTERCONNECT: {
965 desc = interconnect_desc();
966 memcpy(&req->rsp_upiu.qr.data, &desc, sizeof(InterconnectDescriptor));
967 status = UFS_QUERY_RESULT_SUCCESS;
968 break;
970 case UFS_QUERY_DESC_IDN_STRING:
971 status = ufs_read_string_desc(req);
972 break;
973 case UFS_QUERY_DESC_IDN_POWER:
974 /* mocking of power descriptor is not supported */
975 memset(&req->rsp_upiu.qr.data, 0, sizeof(PowerParametersDescriptor));
976 req->rsp_upiu.qr.data[0] = sizeof(PowerParametersDescriptor);
977 req->rsp_upiu.qr.data[1] = UFS_QUERY_DESC_IDN_POWER;
978 status = UFS_QUERY_RESULT_SUCCESS;
979 break;
980 case UFS_QUERY_DESC_IDN_HEALTH:
981 /* mocking of health descriptor is not supported */
982 memset(&req->rsp_upiu.qr.data, 0, sizeof(DeviceHealthDescriptor));
983 req->rsp_upiu.qr.data[0] = sizeof(DeviceHealthDescriptor);
984 req->rsp_upiu.qr.data[1] = UFS_QUERY_DESC_IDN_HEALTH;
985 status = UFS_QUERY_RESULT_SUCCESS;
986 break;
987 default:
988 length = 0;
989 trace_ufs_err_query_invalid_idn(req->req_upiu.qr.opcode, idn);
990 status = UFS_QUERY_RESULT_INVALID_IDN;
993 if (length > req->rsp_upiu.qr.data[0]) {
994 length = req->rsp_upiu.qr.data[0];
996 req->rsp_upiu.qr.opcode = req->req_upiu.qr.opcode;
997 req->rsp_upiu.qr.idn = req->req_upiu.qr.idn;
998 req->rsp_upiu.qr.index = req->req_upiu.qr.index;
999 req->rsp_upiu.qr.selector = req->req_upiu.qr.selector;
1000 req->rsp_upiu.qr.length = cpu_to_be16(length);
1002 return status;
1005 static QueryRespCode ufs_exec_query_read(UfsRequest *req)
1007 QueryRespCode status;
1008 switch (req->req_upiu.qr.opcode) {
1009 case UFS_UPIU_QUERY_OPCODE_NOP:
1010 status = UFS_QUERY_RESULT_SUCCESS;
1011 break;
1012 case UFS_UPIU_QUERY_OPCODE_READ_DESC:
1013 status = ufs_read_desc(req);
1014 break;
1015 case UFS_UPIU_QUERY_OPCODE_READ_ATTR:
1016 status = ufs_exec_query_attr(req, UFS_QUERY_ATTR_READ);
1017 break;
1018 case UFS_UPIU_QUERY_OPCODE_READ_FLAG:
1019 status = ufs_exec_query_flag(req, UFS_QUERY_FLAG_READ);
1020 break;
1021 default:
1022 trace_ufs_err_query_invalid_opcode(req->req_upiu.qr.opcode);
1023 status = UFS_QUERY_RESULT_INVALID_OPCODE;
1024 break;
1027 return status;
1030 static QueryRespCode ufs_exec_query_write(UfsRequest *req)
1032 QueryRespCode status;
1033 switch (req->req_upiu.qr.opcode) {
1034 case UFS_UPIU_QUERY_OPCODE_NOP:
1035 status = UFS_QUERY_RESULT_SUCCESS;
1036 break;
1037 case UFS_UPIU_QUERY_OPCODE_WRITE_DESC:
1038 /* write descriptor is not supported */
1039 status = UFS_QUERY_RESULT_NOT_WRITEABLE;
1040 break;
1041 case UFS_UPIU_QUERY_OPCODE_WRITE_ATTR:
1042 status = ufs_exec_query_attr(req, UFS_QUERY_ATTR_WRITE);
1043 break;
1044 case UFS_UPIU_QUERY_OPCODE_SET_FLAG:
1045 status = ufs_exec_query_flag(req, UFS_QUERY_FLAG_SET);
1046 break;
1047 case UFS_UPIU_QUERY_OPCODE_CLEAR_FLAG:
1048 status = ufs_exec_query_flag(req, UFS_QUERY_FLAG_CLEAR);
1049 break;
1050 case UFS_UPIU_QUERY_OPCODE_TOGGLE_FLAG:
1051 status = ufs_exec_query_flag(req, UFS_QUERY_FLAG_TOGGLE);
1052 break;
1053 default:
1054 trace_ufs_err_query_invalid_opcode(req->req_upiu.qr.opcode);
1055 status = UFS_QUERY_RESULT_INVALID_OPCODE;
1056 break;
1059 return status;
1062 static UfsReqResult ufs_exec_query_cmd(UfsRequest *req)
1064 uint8_t query_func = req->req_upiu.header.query_func;
1065 uint16_t data_segment_length;
1066 QueryRespCode status;
1068 trace_ufs_exec_query_cmd(req->slot, req->req_upiu.qr.opcode);
1069 if (query_func == UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST) {
1070 status = ufs_exec_query_read(req);
1071 } else if (query_func == UFS_UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST) {
1072 status = ufs_exec_query_write(req);
1073 } else {
1074 status = UFS_QUERY_RESULT_GENERAL_FAILURE;
1077 data_segment_length = be16_to_cpu(req->rsp_upiu.qr.length);
1078 ufs_build_upiu_header(req, UFS_UPIU_TRANSACTION_QUERY_RSP, 0, status, 0,
1079 data_segment_length);
1081 if (status != UFS_QUERY_RESULT_SUCCESS) {
1082 return UFS_REQUEST_FAIL;
1084 return UFS_REQUEST_SUCCESS;
1087 static void ufs_exec_req(UfsRequest *req)
1089 UfsReqResult req_result;
1091 if (ufs_dma_read_upiu(req)) {
1092 return;
1095 switch (req->req_upiu.header.trans_type) {
1096 case UFS_UPIU_TRANSACTION_NOP_OUT:
1097 req_result = ufs_exec_nop_cmd(req);
1098 break;
1099 case UFS_UPIU_TRANSACTION_COMMAND:
1100 req_result = ufs_exec_scsi_cmd(req);
1101 break;
1102 case UFS_UPIU_TRANSACTION_QUERY_REQ:
1103 req_result = ufs_exec_query_cmd(req);
1104 break;
1105 default:
1106 trace_ufs_err_invalid_trans_code(req->slot,
1107 req->req_upiu.header.trans_type);
1108 req_result = UFS_REQUEST_FAIL;
1112 * The ufs_complete_req for scsi commands is handled by the
1113 * ufs_scsi_command_complete() callback function. Therefore, to avoid
1114 * duplicate processing, ufs_complete_req() is not called for scsi commands.
1116 if (req_result != UFS_REQUEST_NO_COMPLETE) {
1117 ufs_complete_req(req, req_result);
1121 static void ufs_process_req(void *opaque)
1123 UfsHc *u = opaque;
1124 UfsRequest *req;
1125 int slot;
1127 for (slot = 0; slot < u->params.nutrs; slot++) {
1128 req = &u->req_list[slot];
1130 if (req->state != UFS_REQUEST_READY) {
1131 continue;
1133 trace_ufs_process_req(slot);
1134 req->state = UFS_REQUEST_RUNNING;
1136 ufs_exec_req(req);
1140 static void ufs_complete_req(UfsRequest *req, UfsReqResult req_result)
1142 UfsHc *u = req->hc;
1143 assert(req->state == UFS_REQUEST_RUNNING);
1145 if (req_result == UFS_REQUEST_SUCCESS) {
1146 req->utrd.header.dword_2 = cpu_to_le32(UFS_OCS_SUCCESS);
1147 } else {
1148 req->utrd.header.dword_2 = cpu_to_le32(UFS_OCS_INVALID_CMD_TABLE_ATTR);
1151 trace_ufs_complete_req(req->slot);
1152 req->state = UFS_REQUEST_COMPLETE;
1153 qemu_bh_schedule(u->complete_bh);
1156 static void ufs_clear_req(UfsRequest *req)
1158 if (req->sg != NULL) {
1159 qemu_sglist_destroy(req->sg);
1160 g_free(req->sg);
1161 req->sg = NULL;
1164 memset(&req->utrd, 0, sizeof(req->utrd));
1165 memset(&req->req_upiu, 0, sizeof(req->req_upiu));
1166 memset(&req->rsp_upiu, 0, sizeof(req->rsp_upiu));
1169 static void ufs_sendback_req(void *opaque)
1171 UfsHc *u = opaque;
1172 UfsRequest *req;
1173 int slot;
1175 for (slot = 0; slot < u->params.nutrs; slot++) {
1176 req = &u->req_list[slot];
1178 if (req->state != UFS_REQUEST_COMPLETE) {
1179 continue;
1182 if (ufs_dma_write_upiu(req)) {
1183 req->state = UFS_REQUEST_ERROR;
1184 continue;
1188 * TODO: UTP Transfer Request Interrupt Aggregation Control is not yet
1189 * supported
1191 if (le32_to_cpu(req->utrd.header.dword_2) != UFS_OCS_SUCCESS ||
1192 le32_to_cpu(req->utrd.header.dword_0) & UFS_UTP_REQ_DESC_INT_CMD) {
1193 u->reg.is = FIELD_DP32(u->reg.is, IS, UTRCS, 1);
1196 u->reg.utrldbr &= ~(1 << slot);
1197 u->reg.utrlcnr |= (1 << slot);
1199 trace_ufs_sendback_req(req->slot);
1201 ufs_clear_req(req);
1202 req->state = UFS_REQUEST_IDLE;
1205 ufs_irq_check(u);
1208 static bool ufs_check_constraints(UfsHc *u, Error **errp)
1210 if (u->params.nutrs > UFS_MAX_NUTRS) {
1211 error_setg(errp, "nutrs must be less than or equal to %d",
1212 UFS_MAX_NUTRS);
1213 return false;
1216 if (u->params.nutmrs > UFS_MAX_NUTMRS) {
1217 error_setg(errp, "nutmrs must be less than or equal to %d",
1218 UFS_MAX_NUTMRS);
1219 return false;
1222 return true;
1225 static void ufs_init_pci(UfsHc *u, PCIDevice *pci_dev)
1227 uint8_t *pci_conf = pci_dev->config;
1229 pci_conf[PCI_INTERRUPT_PIN] = 1;
1230 pci_config_set_prog_interface(pci_conf, 0x1);
1232 memory_region_init_io(&u->iomem, OBJECT(u), &ufs_mmio_ops, u, "ufs",
1233 u->reg_size);
1234 pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &u->iomem);
1235 u->irq = pci_allocate_irq(pci_dev);
1238 static void ufs_init_state(UfsHc *u)
1240 u->req_list = g_new0(UfsRequest, u->params.nutrs);
1242 for (int i = 0; i < u->params.nutrs; i++) {
1243 u->req_list[i].hc = u;
1244 u->req_list[i].slot = i;
1245 u->req_list[i].sg = NULL;
1246 u->req_list[i].state = UFS_REQUEST_IDLE;
1249 u->doorbell_bh = qemu_bh_new_guarded(ufs_process_req, u,
1250 &DEVICE(u)->mem_reentrancy_guard);
1251 u->complete_bh = qemu_bh_new_guarded(ufs_sendback_req, u,
1252 &DEVICE(u)->mem_reentrancy_guard);
1255 static void ufs_init_hc(UfsHc *u)
1257 uint32_t cap = 0;
1259 u->reg_size = pow2ceil(sizeof(UfsReg));
1261 memset(&u->reg, 0, sizeof(u->reg));
1262 cap = FIELD_DP32(cap, CAP, NUTRS, (u->params.nutrs - 1));
1263 cap = FIELD_DP32(cap, CAP, RTT, 2);
1264 cap = FIELD_DP32(cap, CAP, NUTMRS, (u->params.nutmrs - 1));
1265 cap = FIELD_DP32(cap, CAP, AUTOH8, 0);
1266 cap = FIELD_DP32(cap, CAP, 64AS, 1);
1267 cap = FIELD_DP32(cap, CAP, OODDS, 0);
1268 cap = FIELD_DP32(cap, CAP, UICDMETMS, 0);
1269 cap = FIELD_DP32(cap, CAP, CS, 0);
1270 u->reg.cap = cap;
1271 u->reg.ver = UFS_SPEC_VER;
1273 memset(&u->device_desc, 0, sizeof(DeviceDescriptor));
1274 u->device_desc.length = sizeof(DeviceDescriptor);
1275 u->device_desc.descriptor_idn = UFS_QUERY_DESC_IDN_DEVICE;
1276 u->device_desc.device_sub_class = 0x01;
1277 u->device_desc.number_lu = 0x00;
1278 u->device_desc.number_wlu = 0x04;
1279 /* TODO: Revisit it when Power Management is implemented */
1280 u->device_desc.init_power_mode = 0x01; /* Active Mode */
1281 u->device_desc.high_priority_lun = 0x7F; /* Same Priority */
1282 u->device_desc.spec_version = cpu_to_be16(UFS_SPEC_VER);
1283 u->device_desc.manufacturer_name = 0x00;
1284 u->device_desc.product_name = 0x01;
1285 u->device_desc.serial_number = 0x02;
1286 u->device_desc.oem_id = 0x03;
1287 u->device_desc.ud_0_base_offset = 0x16;
1288 u->device_desc.ud_config_p_length = 0x1A;
1289 u->device_desc.device_rtt_cap = 0x02;
1290 u->device_desc.queue_depth = u->params.nutrs;
1291 u->device_desc.product_revision_level = 0x04;
1293 memset(&u->geometry_desc, 0, sizeof(GeometryDescriptor));
1294 u->geometry_desc.length = sizeof(GeometryDescriptor);
1295 u->geometry_desc.descriptor_idn = UFS_QUERY_DESC_IDN_GEOMETRY;
1296 u->geometry_desc.max_number_lu = (UFS_MAX_LUS == 32) ? 0x1 : 0x0;
1297 u->geometry_desc.segment_size = cpu_to_be32(0x2000); /* 4KB */
1298 u->geometry_desc.allocation_unit_size = 0x1; /* 4KB */
1299 u->geometry_desc.min_addr_block_size = 0x8; /* 4KB */
1300 u->geometry_desc.max_in_buffer_size = 0x8;
1301 u->geometry_desc.max_out_buffer_size = 0x8;
1302 u->geometry_desc.rpmb_read_write_size = 0x40;
1303 u->geometry_desc.data_ordering =
1304 0x0; /* out-of-order data transfer is not supported */
1305 u->geometry_desc.max_context_id_number = 0x5;
1306 u->geometry_desc.supported_memory_types = cpu_to_be16(0x8001);
1308 memset(&u->attributes, 0, sizeof(u->attributes));
1309 u->attributes.max_data_in_size = 0x08;
1310 u->attributes.max_data_out_size = 0x08;
1311 u->attributes.ref_clk_freq = 0x01; /* 26 MHz */
1312 /* configure descriptor is not supported */
1313 u->attributes.config_descr_lock = 0x01;
1314 u->attributes.max_num_of_rtt = 0x02;
1316 memset(&u->flags, 0, sizeof(u->flags));
1317 u->flags.permanently_disable_fw_update = 1;
1320 static bool ufs_init_wlu(UfsHc *u, UfsWLu **wlu, uint8_t wlun, Error **errp)
1322 UfsWLu *new_wlu = UFSWLU(qdev_new(TYPE_UFS_WLU));
1324 qdev_prop_set_uint32(DEVICE(new_wlu), "lun", wlun);
1327 * The well-known lu shares the same bus as the normal lu. If the well-known
1328 * lu writes the same channel value as the normal lu, the report will be
1329 * made not only for the normal lu but also for the well-known lu at
1330 * REPORT_LUN time. To prevent this, the channel value of normal lu is fixed
1331 * to 0 and the channel value of well-known lu is fixed to 1.
1333 qdev_prop_set_uint32(DEVICE(new_wlu), "channel", 1);
1334 if (!qdev_realize_and_unref(DEVICE(new_wlu), BUS(&u->bus), errp)) {
1335 return false;
1338 *wlu = new_wlu;
1339 return true;
1342 static void ufs_realize(PCIDevice *pci_dev, Error **errp)
1344 UfsHc *u = UFS(pci_dev);
1346 if (!ufs_check_constraints(u, errp)) {
1347 return;
1350 qbus_init(&u->bus, sizeof(UfsBus), TYPE_UFS_BUS, &pci_dev->qdev,
1351 u->parent_obj.qdev.id);
1352 u->bus.parent_bus.info = &ufs_scsi_info;
1354 ufs_init_state(u);
1355 ufs_init_hc(u);
1356 ufs_init_pci(u, pci_dev);
1358 if (!ufs_init_wlu(u, &u->report_wlu, UFS_UPIU_REPORT_LUNS_WLUN, errp)) {
1359 return;
1362 if (!ufs_init_wlu(u, &u->dev_wlu, UFS_UPIU_UFS_DEVICE_WLUN, errp)) {
1363 return;
1366 if (!ufs_init_wlu(u, &u->boot_wlu, UFS_UPIU_BOOT_WLUN, errp)) {
1367 return;
1370 if (!ufs_init_wlu(u, &u->rpmb_wlu, UFS_UPIU_RPMB_WLUN, errp)) {
1371 return;
1375 static void ufs_exit(PCIDevice *pci_dev)
1377 UfsHc *u = UFS(pci_dev);
1379 if (u->dev_wlu) {
1380 object_unref(OBJECT(u->dev_wlu));
1381 u->dev_wlu = NULL;
1384 if (u->report_wlu) {
1385 object_unref(OBJECT(u->report_wlu));
1386 u->report_wlu = NULL;
1389 if (u->rpmb_wlu) {
1390 object_unref(OBJECT(u->rpmb_wlu));
1391 u->rpmb_wlu = NULL;
1394 if (u->boot_wlu) {
1395 object_unref(OBJECT(u->boot_wlu));
1396 u->boot_wlu = NULL;
1399 qemu_bh_delete(u->doorbell_bh);
1400 qemu_bh_delete(u->complete_bh);
1402 for (int i = 0; i < u->params.nutrs; i++) {
1403 ufs_clear_req(&u->req_list[i]);
1405 g_free(u->req_list);
1408 static Property ufs_props[] = {
1409 DEFINE_PROP_STRING("serial", UfsHc, params.serial),
1410 DEFINE_PROP_UINT8("nutrs", UfsHc, params.nutrs, 32),
1411 DEFINE_PROP_UINT8("nutmrs", UfsHc, params.nutmrs, 8),
1412 DEFINE_PROP_END_OF_LIST(),
1415 static const VMStateDescription ufs_vmstate = {
1416 .name = "ufs",
1417 .unmigratable = 1,
1420 static void ufs_class_init(ObjectClass *oc, void *data)
1422 DeviceClass *dc = DEVICE_CLASS(oc);
1423 PCIDeviceClass *pc = PCI_DEVICE_CLASS(oc);
1425 pc->realize = ufs_realize;
1426 pc->exit = ufs_exit;
1427 pc->vendor_id = PCI_VENDOR_ID_REDHAT;
1428 pc->device_id = PCI_DEVICE_ID_REDHAT_UFS;
1429 pc->class_id = PCI_CLASS_STORAGE_UFS;
1431 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1432 dc->desc = "Universal Flash Storage";
1433 device_class_set_props(dc, ufs_props);
1434 dc->vmsd = &ufs_vmstate;
1437 static bool ufs_bus_check_address(BusState *qbus, DeviceState *qdev,
1438 Error **errp)
1440 SCSIDevice *dev = SCSI_DEVICE(qdev);
1441 UfsBusClass *ubc = UFS_BUS_GET_CLASS(qbus);
1442 UfsHc *u = UFS(qbus->parent);
1444 if (strcmp(object_get_typename(OBJECT(dev)), TYPE_UFS_WLU) == 0) {
1445 if (dev->lun != UFS_UPIU_REPORT_LUNS_WLUN &&
1446 dev->lun != UFS_UPIU_UFS_DEVICE_WLUN &&
1447 dev->lun != UFS_UPIU_BOOT_WLUN && dev->lun != UFS_UPIU_RPMB_WLUN) {
1448 error_setg(errp, "bad well-known lun: %d", dev->lun);
1449 return false;
1452 if ((dev->lun == UFS_UPIU_REPORT_LUNS_WLUN && u->report_wlu != NULL) ||
1453 (dev->lun == UFS_UPIU_UFS_DEVICE_WLUN && u->dev_wlu != NULL) ||
1454 (dev->lun == UFS_UPIU_BOOT_WLUN && u->boot_wlu != NULL) ||
1455 (dev->lun == UFS_UPIU_RPMB_WLUN && u->rpmb_wlu != NULL)) {
1456 error_setg(errp, "well-known lun %d already exists", dev->lun);
1457 return false;
1460 return true;
1463 if (strcmp(object_get_typename(OBJECT(dev)), TYPE_UFS_LU) != 0) {
1464 error_setg(errp, "%s cannot be connected to ufs-bus",
1465 object_get_typename(OBJECT(dev)));
1466 return false;
1469 return ubc->parent_check_address(qbus, qdev, errp);
1472 static void ufs_bus_class_init(ObjectClass *class, void *data)
1474 BusClass *bc = BUS_CLASS(class);
1475 UfsBusClass *ubc = UFS_BUS_CLASS(class);
1476 ubc->parent_check_address = bc->check_address;
1477 bc->check_address = ufs_bus_check_address;
1480 static const TypeInfo ufs_info = {
1481 .name = TYPE_UFS,
1482 .parent = TYPE_PCI_DEVICE,
1483 .class_init = ufs_class_init,
1484 .instance_size = sizeof(UfsHc),
1485 .interfaces = (InterfaceInfo[]){ { INTERFACE_PCIE_DEVICE }, {} },
1488 static const TypeInfo ufs_bus_info = {
1489 .name = TYPE_UFS_BUS,
1490 .parent = TYPE_SCSI_BUS,
1491 .class_init = ufs_bus_class_init,
1492 .class_size = sizeof(UfsBusClass),
1493 .instance_size = sizeof(UfsBus),
1496 static void ufs_register_types(void)
1498 type_register_static(&ufs_info);
1499 type_register_static(&ufs_bus_info);
1502 type_init(ufs_register_types)