3 #include "qemu/cutils.h"
5 typedef struct NvmeBar
{
29 CAP_MPSMIN_SHIFT
= 48,
30 CAP_MPSMAX_SHIFT
= 52,
34 CAP_MQES_MASK
= 0xffff,
41 CAP_MPSMIN_MASK
= 0xf,
42 CAP_MPSMAX_MASK
= 0xf,
45 #define NVME_CAP_MQES(cap) (((cap) >> CAP_MQES_SHIFT) & CAP_MQES_MASK)
46 #define NVME_CAP_CQR(cap) (((cap) >> CAP_CQR_SHIFT) & CAP_CQR_MASK)
47 #define NVME_CAP_AMS(cap) (((cap) >> CAP_AMS_SHIFT) & CAP_AMS_MASK)
48 #define NVME_CAP_TO(cap) (((cap) >> CAP_TO_SHIFT) & CAP_TO_MASK)
49 #define NVME_CAP_DSTRD(cap) (((cap) >> CAP_DSTRD_SHIFT) & CAP_DSTRD_MASK)
50 #define NVME_CAP_NSSRS(cap) (((cap) >> CAP_NSSRS_SHIFT) & CAP_NSSRS_MASK)
51 #define NVME_CAP_CSS(cap) (((cap) >> CAP_CSS_SHIFT) & CAP_CSS_MASK)
52 #define NVME_CAP_MPSMIN(cap)(((cap) >> CAP_MPSMIN_SHIFT) & CAP_MPSMIN_MASK)
53 #define NVME_CAP_MPSMAX(cap)(((cap) >> CAP_MPSMAX_SHIFT) & CAP_MPSMAX_MASK)
55 #define NVME_CAP_SET_MQES(cap, val) (cap |= (uint64_t)(val & CAP_MQES_MASK) \
57 #define NVME_CAP_SET_CQR(cap, val) (cap |= (uint64_t)(val & CAP_CQR_MASK) \
59 #define NVME_CAP_SET_AMS(cap, val) (cap |= (uint64_t)(val & CAP_AMS_MASK) \
61 #define NVME_CAP_SET_TO(cap, val) (cap |= (uint64_t)(val & CAP_TO_MASK) \
63 #define NVME_CAP_SET_DSTRD(cap, val) (cap |= (uint64_t)(val & CAP_DSTRD_MASK) \
65 #define NVME_CAP_SET_NSSRS(cap, val) (cap |= (uint64_t)(val & CAP_NSSRS_MASK) \
67 #define NVME_CAP_SET_CSS(cap, val) (cap |= (uint64_t)(val & CAP_CSS_MASK) \
69 #define NVME_CAP_SET_MPSMIN(cap, val) (cap |= (uint64_t)(val & CAP_MPSMIN_MASK)\
71 #define NVME_CAP_SET_MPSMAX(cap, val) (cap |= (uint64_t)(val & CAP_MPSMAX_MASK)\
94 #define NVME_CC_EN(cc) ((cc >> CC_EN_SHIFT) & CC_EN_MASK)
95 #define NVME_CC_CSS(cc) ((cc >> CC_CSS_SHIFT) & CC_CSS_MASK)
96 #define NVME_CC_MPS(cc) ((cc >> CC_MPS_SHIFT) & CC_MPS_MASK)
97 #define NVME_CC_AMS(cc) ((cc >> CC_AMS_SHIFT) & CC_AMS_MASK)
98 #define NVME_CC_SHN(cc) ((cc >> CC_SHN_SHIFT) & CC_SHN_MASK)
99 #define NVME_CC_IOSQES(cc) ((cc >> CC_IOSQES_SHIFT) & CC_IOSQES_MASK)
100 #define NVME_CC_IOCQES(cc) ((cc >> CC_IOCQES_SHIFT) & CC_IOCQES_MASK)
106 CSTS_NSSRO_SHIFT
= 4,
112 CSTS_SHST_MASK
= 0x3,
113 CSTS_NSSRO_MASK
= 0x1,
117 NVME_CSTS_READY
= 1 << CSTS_RDY_SHIFT
,
118 NVME_CSTS_FAILED
= 1 << CSTS_CFS_SHIFT
,
119 NVME_CSTS_SHST_NORMAL
= 0 << CSTS_SHST_SHIFT
,
120 NVME_CSTS_SHST_PROGRESS
= 1 << CSTS_SHST_SHIFT
,
121 NVME_CSTS_SHST_COMPLETE
= 2 << CSTS_SHST_SHIFT
,
122 NVME_CSTS_NSSRO
= 1 << CSTS_NSSRO_SHIFT
,
125 #define NVME_CSTS_RDY(csts) ((csts >> CSTS_RDY_SHIFT) & CSTS_RDY_MASK)
126 #define NVME_CSTS_CFS(csts) ((csts >> CSTS_CFS_SHIFT) & CSTS_CFS_MASK)
127 #define NVME_CSTS_SHST(csts) ((csts >> CSTS_SHST_SHIFT) & CSTS_SHST_MASK)
128 #define NVME_CSTS_NSSRO(csts) ((csts >> CSTS_NSSRO_SHIFT) & CSTS_NSSRO_MASK)
136 AQA_ASQS_MASK
= 0xfff,
137 AQA_ACQS_MASK
= 0xfff,
140 #define NVME_AQA_ASQS(aqa) ((aqa >> AQA_ASQS_SHIFT) & AQA_ASQS_MASK)
141 #define NVME_AQA_ACQS(aqa) ((aqa >> AQA_ACQS_SHIFT) & AQA_ACQS_MASK)
143 enum NvmeCmblocShift
{
144 CMBLOC_BIR_SHIFT
= 0,
145 CMBLOC_OFST_SHIFT
= 12,
148 enum NvmeCmblocMask
{
149 CMBLOC_BIR_MASK
= 0x7,
150 CMBLOC_OFST_MASK
= 0xfffff,
153 #define NVME_CMBLOC_BIR(cmbloc) ((cmbloc >> CMBLOC_BIR_SHIFT) & \
155 #define NVME_CMBLOC_OFST(cmbloc)((cmbloc >> CMBLOC_OFST_SHIFT) & \
158 #define NVME_CMBLOC_SET_BIR(cmbloc, val) \
159 (cmbloc |= (uint64_t)(val & CMBLOC_BIR_MASK) << CMBLOC_BIR_SHIFT)
160 #define NVME_CMBLOC_SET_OFST(cmbloc, val) \
161 (cmbloc |= (uint64_t)(val & CMBLOC_OFST_MASK) << CMBLOC_OFST_SHIFT)
163 enum NvmeCmbszShift
{
166 CMBSZ_LISTS_SHIFT
= 2,
174 CMBSZ_SQS_MASK
= 0x1,
175 CMBSZ_CQS_MASK
= 0x1,
176 CMBSZ_LISTS_MASK
= 0x1,
177 CMBSZ_RDS_MASK
= 0x1,
178 CMBSZ_WDS_MASK
= 0x1,
179 CMBSZ_SZU_MASK
= 0xf,
180 CMBSZ_SZ_MASK
= 0xfffff,
183 #define NVME_CMBSZ_SQS(cmbsz) ((cmbsz >> CMBSZ_SQS_SHIFT) & CMBSZ_SQS_MASK)
184 #define NVME_CMBSZ_CQS(cmbsz) ((cmbsz >> CMBSZ_CQS_SHIFT) & CMBSZ_CQS_MASK)
185 #define NVME_CMBSZ_LISTS(cmbsz)((cmbsz >> CMBSZ_LISTS_SHIFT) & CMBSZ_LISTS_MASK)
186 #define NVME_CMBSZ_RDS(cmbsz) ((cmbsz >> CMBSZ_RDS_SHIFT) & CMBSZ_RDS_MASK)
187 #define NVME_CMBSZ_WDS(cmbsz) ((cmbsz >> CMBSZ_WDS_SHIFT) & CMBSZ_WDS_MASK)
188 #define NVME_CMBSZ_SZU(cmbsz) ((cmbsz >> CMBSZ_SZU_SHIFT) & CMBSZ_SZU_MASK)
189 #define NVME_CMBSZ_SZ(cmbsz) ((cmbsz >> CMBSZ_SZ_SHIFT) & CMBSZ_SZ_MASK)
191 #define NVME_CMBSZ_SET_SQS(cmbsz, val) \
192 (cmbsz |= (uint64_t)(val & CMBSZ_SQS_MASK) << CMBSZ_SQS_SHIFT)
193 #define NVME_CMBSZ_SET_CQS(cmbsz, val) \
194 (cmbsz |= (uint64_t)(val & CMBSZ_CQS_MASK) << CMBSZ_CQS_SHIFT)
195 #define NVME_CMBSZ_SET_LISTS(cmbsz, val) \
196 (cmbsz |= (uint64_t)(val & CMBSZ_LISTS_MASK) << CMBSZ_LISTS_SHIFT)
197 #define NVME_CMBSZ_SET_RDS(cmbsz, val) \
198 (cmbsz |= (uint64_t)(val & CMBSZ_RDS_MASK) << CMBSZ_RDS_SHIFT)
199 #define NVME_CMBSZ_SET_WDS(cmbsz, val) \
200 (cmbsz |= (uint64_t)(val & CMBSZ_WDS_MASK) << CMBSZ_WDS_SHIFT)
201 #define NVME_CMBSZ_SET_SZU(cmbsz, val) \
202 (cmbsz |= (uint64_t)(val & CMBSZ_SZU_MASK) << CMBSZ_SZU_SHIFT)
203 #define NVME_CMBSZ_SET_SZ(cmbsz, val) \
204 (cmbsz |= (uint64_t)(val & CMBSZ_SZ_MASK) << CMBSZ_SZ_SHIFT)
206 #define NVME_CMBSZ_GETSIZE(cmbsz) \
207 (NVME_CMBSZ_SZ(cmbsz) * (1 << (12 + 4 * NVME_CMBSZ_SZU(cmbsz))))
209 typedef struct NvmeCmd
{
226 enum NvmeAdminCommands
{
227 NVME_ADM_CMD_DELETE_SQ
= 0x00,
228 NVME_ADM_CMD_CREATE_SQ
= 0x01,
229 NVME_ADM_CMD_GET_LOG_PAGE
= 0x02,
230 NVME_ADM_CMD_DELETE_CQ
= 0x04,
231 NVME_ADM_CMD_CREATE_CQ
= 0x05,
232 NVME_ADM_CMD_IDENTIFY
= 0x06,
233 NVME_ADM_CMD_ABORT
= 0x08,
234 NVME_ADM_CMD_SET_FEATURES
= 0x09,
235 NVME_ADM_CMD_GET_FEATURES
= 0x0a,
236 NVME_ADM_CMD_ASYNC_EV_REQ
= 0x0c,
237 NVME_ADM_CMD_ACTIVATE_FW
= 0x10,
238 NVME_ADM_CMD_DOWNLOAD_FW
= 0x11,
239 NVME_ADM_CMD_FORMAT_NVM
= 0x80,
240 NVME_ADM_CMD_SECURITY_SEND
= 0x81,
241 NVME_ADM_CMD_SECURITY_RECV
= 0x82,
244 enum NvmeIoCommands
{
245 NVME_CMD_FLUSH
= 0x00,
246 NVME_CMD_WRITE
= 0x01,
247 NVME_CMD_READ
= 0x02,
248 NVME_CMD_WRITE_UNCOR
= 0x04,
249 NVME_CMD_COMPARE
= 0x05,
250 NVME_CMD_WRITE_ZEROS
= 0x08,
254 typedef struct NvmeDeleteQ
{
264 typedef struct NvmeCreateCq
{
278 #define NVME_CQ_FLAGS_PC(cq_flags) (cq_flags & 0x1)
279 #define NVME_CQ_FLAGS_IEN(cq_flags) ((cq_flags >> 1) & 0x1)
281 typedef struct NvmeCreateSq
{
295 #define NVME_SQ_FLAGS_PC(sq_flags) (sq_flags & 0x1)
296 #define NVME_SQ_FLAGS_QPRIO(sq_flags) ((sq_flags >> 1) & 0x3)
298 enum NvmeQueueFlags
{
300 NVME_Q_PRIO_URGENT
= 0,
301 NVME_Q_PRIO_HIGH
= 1,
302 NVME_Q_PRIO_NORMAL
= 2,
306 typedef struct NvmeIdentify
{
318 typedef struct NvmeRwCmd
{
337 NVME_RW_LR
= 1 << 15,
338 NVME_RW_FUA
= 1 << 14,
339 NVME_RW_DSM_FREQ_UNSPEC
= 0,
340 NVME_RW_DSM_FREQ_TYPICAL
= 1,
341 NVME_RW_DSM_FREQ_RARE
= 2,
342 NVME_RW_DSM_FREQ_READS
= 3,
343 NVME_RW_DSM_FREQ_WRITES
= 4,
344 NVME_RW_DSM_FREQ_RW
= 5,
345 NVME_RW_DSM_FREQ_ONCE
= 6,
346 NVME_RW_DSM_FREQ_PREFETCH
= 7,
347 NVME_RW_DSM_FREQ_TEMP
= 8,
348 NVME_RW_DSM_LATENCY_NONE
= 0 << 4,
349 NVME_RW_DSM_LATENCY_IDLE
= 1 << 4,
350 NVME_RW_DSM_LATENCY_NORM
= 2 << 4,
351 NVME_RW_DSM_LATENCY_LOW
= 3 << 4,
352 NVME_RW_DSM_SEQ_REQ
= 1 << 6,
353 NVME_RW_DSM_COMPRESSED
= 1 << 7,
354 NVME_RW_PRINFO_PRACT
= 1 << 13,
355 NVME_RW_PRINFO_PRCHK_GUARD
= 1 << 12,
356 NVME_RW_PRINFO_PRCHK_APP
= 1 << 11,
357 NVME_RW_PRINFO_PRCHK_REF
= 1 << 10,
360 typedef struct NvmeDsmCmd
{
374 NVME_DSMGMT_IDR
= 1 << 0,
375 NVME_DSMGMT_IDW
= 1 << 1,
376 NVME_DSMGMT_AD
= 1 << 2,
379 typedef struct NvmeDsmRange
{
385 enum NvmeAsyncEventRequest
{
386 NVME_AER_TYPE_ERROR
= 0,
387 NVME_AER_TYPE_SMART
= 1,
388 NVME_AER_TYPE_IO_SPECIFIC
= 6,
389 NVME_AER_TYPE_VENDOR_SPECIFIC
= 7,
390 NVME_AER_INFO_ERR_INVALID_SQ
= 0,
391 NVME_AER_INFO_ERR_INVALID_DB
= 1,
392 NVME_AER_INFO_ERR_DIAG_FAIL
= 2,
393 NVME_AER_INFO_ERR_PERS_INTERNAL_ERR
= 3,
394 NVME_AER_INFO_ERR_TRANS_INTERNAL_ERR
= 4,
395 NVME_AER_INFO_ERR_FW_IMG_LOAD_ERR
= 5,
396 NVME_AER_INFO_SMART_RELIABILITY
= 0,
397 NVME_AER_INFO_SMART_TEMP_THRESH
= 1,
398 NVME_AER_INFO_SMART_SPARE_THRESH
= 2,
401 typedef struct NvmeAerResult
{
408 typedef struct NvmeCqe
{
417 enum NvmeStatusCodes
{
418 NVME_SUCCESS
= 0x0000,
419 NVME_INVALID_OPCODE
= 0x0001,
420 NVME_INVALID_FIELD
= 0x0002,
421 NVME_CID_CONFLICT
= 0x0003,
422 NVME_DATA_TRAS_ERROR
= 0x0004,
423 NVME_POWER_LOSS_ABORT
= 0x0005,
424 NVME_INTERNAL_DEV_ERROR
= 0x0006,
425 NVME_CMD_ABORT_REQ
= 0x0007,
426 NVME_CMD_ABORT_SQ_DEL
= 0x0008,
427 NVME_CMD_ABORT_FAILED_FUSE
= 0x0009,
428 NVME_CMD_ABORT_MISSING_FUSE
= 0x000a,
429 NVME_INVALID_NSID
= 0x000b,
430 NVME_CMD_SEQ_ERROR
= 0x000c,
431 NVME_LBA_RANGE
= 0x0080,
432 NVME_CAP_EXCEEDED
= 0x0081,
433 NVME_NS_NOT_READY
= 0x0082,
434 NVME_NS_RESV_CONFLICT
= 0x0083,
435 NVME_INVALID_CQID
= 0x0100,
436 NVME_INVALID_QID
= 0x0101,
437 NVME_MAX_QSIZE_EXCEEDED
= 0x0102,
438 NVME_ACL_EXCEEDED
= 0x0103,
439 NVME_RESERVED
= 0x0104,
440 NVME_AER_LIMIT_EXCEEDED
= 0x0105,
441 NVME_INVALID_FW_SLOT
= 0x0106,
442 NVME_INVALID_FW_IMAGE
= 0x0107,
443 NVME_INVALID_IRQ_VECTOR
= 0x0108,
444 NVME_INVALID_LOG_ID
= 0x0109,
445 NVME_INVALID_FORMAT
= 0x010a,
446 NVME_FW_REQ_RESET
= 0x010b,
447 NVME_INVALID_QUEUE_DEL
= 0x010c,
448 NVME_FID_NOT_SAVEABLE
= 0x010d,
449 NVME_FID_NOT_NSID_SPEC
= 0x010f,
450 NVME_FW_REQ_SUSYSTEM_RESET
= 0x0110,
451 NVME_CONFLICTING_ATTRS
= 0x0180,
452 NVME_INVALID_PROT_INFO
= 0x0181,
453 NVME_WRITE_TO_RO
= 0x0182,
454 NVME_WRITE_FAULT
= 0x0280,
455 NVME_UNRECOVERED_READ
= 0x0281,
456 NVME_E2E_GUARD_ERROR
= 0x0282,
457 NVME_E2E_APP_ERROR
= 0x0283,
458 NVME_E2E_REF_ERROR
= 0x0284,
459 NVME_CMP_FAILURE
= 0x0285,
460 NVME_ACCESS_DENIED
= 0x0286,
463 NVME_NO_COMPLETE
= 0xffff,
466 typedef struct NvmeFwSlotInfoLog
{
468 uint8_t reserved1
[7];
476 uint8_t reserved2
[448];
479 typedef struct NvmeErrorLog
{
480 uint64_t error_count
;
483 uint16_t status_field
;
484 uint16_t param_error_location
;
491 typedef struct NvmeSmartLog
{
492 uint8_t critical_warning
;
493 uint8_t temperature
[2];
494 uint8_t available_spare
;
495 uint8_t available_spare_threshold
;
496 uint8_t percentage_used
;
497 uint8_t reserved1
[26];
498 uint64_t data_units_read
[2];
499 uint64_t data_units_written
[2];
500 uint64_t host_read_commands
[2];
501 uint64_t host_write_commands
[2];
502 uint64_t controller_busy_time
[2];
503 uint64_t power_cycles
[2];
504 uint64_t power_on_hours
[2];
505 uint64_t unsafe_shutdowns
[2];
506 uint64_t media_errors
[2];
507 uint64_t number_of_error_log_entries
[2];
508 uint8_t reserved2
[320];
512 NVME_SMART_SPARE
= 1 << 0,
513 NVME_SMART_TEMPERATURE
= 1 << 1,
514 NVME_SMART_RELIABILITY
= 1 << 2,
515 NVME_SMART_MEDIA_READ_ONLY
= 1 << 3,
516 NVME_SMART_FAILED_VOLATILE_MEDIA
= 1 << 4,
520 NVME_LOG_ERROR_INFO
= 0x01,
521 NVME_LOG_SMART_INFO
= 0x02,
522 NVME_LOG_FW_SLOT_INFO
= 0x03,
525 typedef struct NvmePSD
{
537 typedef struct NvmeIdCtrl
{
547 uint8_t rsvd255
[178];
555 uint8_t rsvd511
[248];
566 uint8_t rsvd703
[174];
567 uint8_t rsvd2047
[1344];
572 enum NvmeIdCtrlOacs
{
573 NVME_OACS_SECURITY
= 1 << 0,
574 NVME_OACS_FORMAT
= 1 << 1,
575 NVME_OACS_FW
= 1 << 2,
578 enum NvmeIdCtrlOncs
{
579 NVME_ONCS_COMPARE
= 1 << 0,
580 NVME_ONCS_WRITE_UNCORR
= 1 << 1,
581 NVME_ONCS_DSM
= 1 << 2,
582 NVME_ONCS_WRITE_ZEROS
= 1 << 3,
583 NVME_ONCS_FEATURES
= 1 << 4,
584 NVME_ONCS_RESRVATIONS
= 1 << 5,
587 #define NVME_CTRL_SQES_MIN(sqes) ((sqes) & 0xf)
588 #define NVME_CTRL_SQES_MAX(sqes) (((sqes) >> 4) & 0xf)
589 #define NVME_CTRL_CQES_MIN(cqes) ((cqes) & 0xf)
590 #define NVME_CTRL_CQES_MAX(cqes) (((cqes) >> 4) & 0xf)
592 typedef struct NvmeFeatureVal
{
593 uint32_t arbitration
;
595 uint32_t temp_thresh
;
597 uint32_t volatile_wc
;
599 uint32_t int_coalescing
;
600 uint32_t *int_vector_config
;
601 uint32_t write_atomicity
;
602 uint32_t async_config
;
603 uint32_t sw_prog_marker
;
606 #define NVME_ARB_AB(arb) (arb & 0x7)
607 #define NVME_ARB_LPW(arb) ((arb >> 8) & 0xff)
608 #define NVME_ARB_MPW(arb) ((arb >> 16) & 0xff)
609 #define NVME_ARB_HPW(arb) ((arb >> 24) & 0xff)
611 #define NVME_INTC_THR(intc) (intc & 0xff)
612 #define NVME_INTC_TIME(intc) ((intc >> 8) & 0xff)
614 enum NvmeFeatureIds
{
615 NVME_ARBITRATION
= 0x1,
616 NVME_POWER_MANAGEMENT
= 0x2,
617 NVME_LBA_RANGE_TYPE
= 0x3,
618 NVME_TEMPERATURE_THRESHOLD
= 0x4,
619 NVME_ERROR_RECOVERY
= 0x5,
620 NVME_VOLATILE_WRITE_CACHE
= 0x6,
621 NVME_NUMBER_OF_QUEUES
= 0x7,
622 NVME_INTERRUPT_COALESCING
= 0x8,
623 NVME_INTERRUPT_VECTOR_CONF
= 0x9,
624 NVME_WRITE_ATOMICITY
= 0xa,
625 NVME_ASYNCHRONOUS_EVENT_CONF
= 0xb,
626 NVME_SOFTWARE_PROGRESS_MARKER
= 0x80
629 typedef struct NvmeRangeType
{
639 typedef struct NvmeLBAF
{
645 typedef struct NvmeIdNs
{
661 #define NVME_ID_NS_NSFEAT_THIN(nsfeat) ((nsfeat & 0x1))
662 #define NVME_ID_NS_FLBAS_EXTENDED(flbas) ((flbas >> 4) & 0x1)
663 #define NVME_ID_NS_FLBAS_INDEX(flbas) ((flbas & 0xf))
664 #define NVME_ID_NS_MC_SEPARATE(mc) ((mc >> 1) & 0x1)
665 #define NVME_ID_NS_MC_EXTENDED(mc) ((mc & 0x1))
666 #define NVME_ID_NS_DPC_LAST_EIGHT(dpc) ((dpc >> 4) & 0x1)
667 #define NVME_ID_NS_DPC_FIRST_EIGHT(dpc) ((dpc >> 3) & 0x1)
668 #define NVME_ID_NS_DPC_TYPE_3(dpc) ((dpc >> 2) & 0x1)
669 #define NVME_ID_NS_DPC_TYPE_2(dpc) ((dpc >> 1) & 0x1)
670 #define NVME_ID_NS_DPC_TYPE_1(dpc) ((dpc & 0x1))
671 #define NVME_ID_NS_DPC_TYPE_MASK 0x7
682 static inline void _nvme_check_size(void)
684 QEMU_BUILD_BUG_ON(sizeof(NvmeAerResult
) != 4);
685 QEMU_BUILD_BUG_ON(sizeof(NvmeCqe
) != 16);
686 QEMU_BUILD_BUG_ON(sizeof(NvmeDsmRange
) != 16);
687 QEMU_BUILD_BUG_ON(sizeof(NvmeCmd
) != 64);
688 QEMU_BUILD_BUG_ON(sizeof(NvmeDeleteQ
) != 64);
689 QEMU_BUILD_BUG_ON(sizeof(NvmeCreateCq
) != 64);
690 QEMU_BUILD_BUG_ON(sizeof(NvmeCreateSq
) != 64);
691 QEMU_BUILD_BUG_ON(sizeof(NvmeIdentify
) != 64);
692 QEMU_BUILD_BUG_ON(sizeof(NvmeRwCmd
) != 64);
693 QEMU_BUILD_BUG_ON(sizeof(NvmeDsmCmd
) != 64);
694 QEMU_BUILD_BUG_ON(sizeof(NvmeRangeType
) != 64);
695 QEMU_BUILD_BUG_ON(sizeof(NvmeErrorLog
) != 64);
696 QEMU_BUILD_BUG_ON(sizeof(NvmeFwSlotInfoLog
) != 512);
697 QEMU_BUILD_BUG_ON(sizeof(NvmeSmartLog
) != 512);
698 QEMU_BUILD_BUG_ON(sizeof(NvmeIdCtrl
) != 4096);
699 QEMU_BUILD_BUG_ON(sizeof(NvmeIdNs
) != 4096);
702 typedef struct NvmeAsyncEvent
{
703 QSIMPLEQ_ENTRY(NvmeAsyncEvent
) entry
;
704 NvmeAerResult result
;
707 typedef struct NvmeRequest
{
708 struct NvmeSQueue
*sq
;
713 BlockAcctCookie acct
;
716 QTAILQ_ENTRY(NvmeRequest
)entry
;
719 typedef struct NvmeSQueue
{
720 struct NvmeCtrl
*ctrl
;
729 QTAILQ_HEAD(sq_req_list
, NvmeRequest
) req_list
;
730 QTAILQ_HEAD(out_req_list
, NvmeRequest
) out_req_list
;
731 QTAILQ_ENTRY(NvmeSQueue
) entry
;
734 typedef struct NvmeCQueue
{
735 struct NvmeCtrl
*ctrl
;
738 uint16_t irq_enabled
;
745 QTAILQ_HEAD(sq_list
, NvmeSQueue
) sq_list
;
746 QTAILQ_HEAD(cq_req_list
, NvmeRequest
) req_list
;
749 typedef struct NvmeNamespace
{
753 #define TYPE_NVME "nvme"
755 OBJECT_CHECK(NvmeCtrl, (obj), TYPE_NVME)
757 typedef struct NvmeCtrl
{
758 PCIDevice parent_obj
;
760 MemoryRegion ctrl_mem
;
766 uint16_t max_prp_ents
;
770 uint32_t num_namespaces
;
774 uint32_t cmb_size_mb
;
781 NvmeNamespace
*namespaces
;
789 #endif /* HW_NVME_H */