4 typedef struct NvmeBar
{
28 CAP_MPSMIN_SHIFT
= 48,
29 CAP_MPSMAX_SHIFT
= 52,
33 CAP_MQES_MASK
= 0xffff,
40 CAP_MPSMIN_MASK
= 0xf,
41 CAP_MPSMAX_MASK
= 0xf,
44 #define NVME_CAP_MQES(cap) (((cap) >> CAP_MQES_SHIFT) & CAP_MQES_MASK)
45 #define NVME_CAP_CQR(cap) (((cap) >> CAP_CQR_SHIFT) & CAP_CQR_MASK)
46 #define NVME_CAP_AMS(cap) (((cap) >> CAP_AMS_SHIFT) & CAP_AMS_MASK)
47 #define NVME_CAP_TO(cap) (((cap) >> CAP_TO_SHIFT) & CAP_TO_MASK)
48 #define NVME_CAP_DSTRD(cap) (((cap) >> CAP_DSTRD_SHIFT) & CAP_DSTRD_MASK)
49 #define NVME_CAP_NSSRS(cap) (((cap) >> CAP_NSSRS_SHIFT) & CAP_NSSRS_MASK)
50 #define NVME_CAP_CSS(cap) (((cap) >> CAP_CSS_SHIFT) & CAP_CSS_MASK)
51 #define NVME_CAP_MPSMIN(cap)(((cap) >> CAP_MPSMIN_SHIFT) & CAP_MPSMIN_MASK)
52 #define NVME_CAP_MPSMAX(cap)(((cap) >> CAP_MPSMAX_SHIFT) & CAP_MPSMAX_MASK)
54 #define NVME_CAP_SET_MQES(cap, val) (cap |= (uint64_t)(val & CAP_MQES_MASK) \
56 #define NVME_CAP_SET_CQR(cap, val) (cap |= (uint64_t)(val & CAP_CQR_MASK) \
58 #define NVME_CAP_SET_AMS(cap, val) (cap |= (uint64_t)(val & CAP_AMS_MASK) \
60 #define NVME_CAP_SET_TO(cap, val) (cap |= (uint64_t)(val & CAP_TO_MASK) \
62 #define NVME_CAP_SET_DSTRD(cap, val) (cap |= (uint64_t)(val & CAP_DSTRD_MASK) \
64 #define NVME_CAP_SET_NSSRS(cap, val) (cap |= (uint64_t)(val & CAP_NSSRS_MASK) \
66 #define NVME_CAP_SET_CSS(cap, val) (cap |= (uint64_t)(val & CAP_CSS_MASK) \
68 #define NVME_CAP_SET_MPSMIN(cap, val) (cap |= (uint64_t)(val & CAP_MPSMIN_MASK)\
70 #define NVME_CAP_SET_MPSMAX(cap, val) (cap |= (uint64_t)(val & CAP_MPSMAX_MASK)\
93 #define NVME_CC_EN(cc) ((cc >> CC_EN_SHIFT) & CC_EN_MASK)
94 #define NVME_CC_CSS(cc) ((cc >> CC_CSS_SHIFT) & CC_CSS_MASK)
95 #define NVME_CC_MPS(cc) ((cc >> CC_MPS_SHIFT) & CC_MPS_MASK)
96 #define NVME_CC_AMS(cc) ((cc >> CC_AMS_SHIFT) & CC_AMS_MASK)
97 #define NVME_CC_SHN(cc) ((cc >> CC_SHN_SHIFT) & CC_SHN_MASK)
98 #define NVME_CC_IOSQES(cc) ((cc >> CC_IOSQES_SHIFT) & CC_IOSQES_MASK)
99 #define NVME_CC_IOCQES(cc) ((cc >> CC_IOCQES_SHIFT) & CC_IOCQES_MASK)
105 CSTS_NSSRO_SHIFT
= 4,
111 CSTS_SHST_MASK
= 0x3,
112 CSTS_NSSRO_MASK
= 0x1,
116 NVME_CSTS_READY
= 1 << CSTS_RDY_SHIFT
,
117 NVME_CSTS_FAILED
= 1 << CSTS_CFS_SHIFT
,
118 NVME_CSTS_SHST_NORMAL
= 0 << CSTS_SHST_SHIFT
,
119 NVME_CSTS_SHST_PROGRESS
= 1 << CSTS_SHST_SHIFT
,
120 NVME_CSTS_SHST_COMPLETE
= 2 << CSTS_SHST_SHIFT
,
121 NVME_CSTS_NSSRO
= 1 << CSTS_NSSRO_SHIFT
,
124 #define NVME_CSTS_RDY(csts) ((csts >> CSTS_RDY_SHIFT) & CSTS_RDY_MASK)
125 #define NVME_CSTS_CFS(csts) ((csts >> CSTS_CFS_SHIFT) & CSTS_CFS_MASK)
126 #define NVME_CSTS_SHST(csts) ((csts >> CSTS_SHST_SHIFT) & CSTS_SHST_MASK)
127 #define NVME_CSTS_NSSRO(csts) ((csts >> CSTS_NSSRO_SHIFT) & CSTS_NSSRO_MASK)
135 AQA_ASQS_MASK
= 0xfff,
136 AQA_ACQS_MASK
= 0xfff,
139 #define NVME_AQA_ASQS(aqa) ((aqa >> AQA_ASQS_SHIFT) & AQA_ASQS_MASK)
140 #define NVME_AQA_ACQS(aqa) ((aqa >> AQA_ACQS_SHIFT) & AQA_ACQS_MASK)
142 enum NvmeCmblocShift
{
143 CMBLOC_BIR_SHIFT
= 0,
144 CMBLOC_OFST_SHIFT
= 12,
147 enum NvmeCmblocMask
{
148 CMBLOC_BIR_MASK
= 0x7,
149 CMBLOC_OFST_MASK
= 0xfffff,
152 #define NVME_CMBLOC_BIR(cmbloc) ((cmbloc >> CMBLOC_BIR_SHIFT) & \
154 #define NVME_CMBLOC_OFST(cmbloc)((cmbloc >> CMBLOC_OFST_SHIFT) & \
157 #define NVME_CMBLOC_SET_BIR(cmbloc, val) \
158 (cmbloc |= (uint64_t)(val & CMBLOC_BIR_MASK) << CMBLOC_BIR_SHIFT)
159 #define NVME_CMBLOC_SET_OFST(cmbloc, val) \
160 (cmbloc |= (uint64_t)(val & CMBLOC_OFST_MASK) << CMBLOC_OFST_SHIFT)
162 enum NvmeCmbszShift
{
165 CMBSZ_LISTS_SHIFT
= 2,
173 CMBSZ_SQS_MASK
= 0x1,
174 CMBSZ_CQS_MASK
= 0x1,
175 CMBSZ_LISTS_MASK
= 0x1,
176 CMBSZ_RDS_MASK
= 0x1,
177 CMBSZ_WDS_MASK
= 0x1,
178 CMBSZ_SZU_MASK
= 0xf,
179 CMBSZ_SZ_MASK
= 0xfffff,
182 #define NVME_CMBSZ_SQS(cmbsz) ((cmbsz >> CMBSZ_SQS_SHIFT) & CMBSZ_SQS_MASK)
183 #define NVME_CMBSZ_CQS(cmbsz) ((cmbsz >> CMBSZ_CQS_SHIFT) & CMBSZ_CQS_MASK)
184 #define NVME_CMBSZ_LISTS(cmbsz)((cmbsz >> CMBSZ_LISTS_SHIFT) & CMBSZ_LISTS_MASK)
185 #define NVME_CMBSZ_RDS(cmbsz) ((cmbsz >> CMBSZ_RDS_SHIFT) & CMBSZ_RDS_MASK)
186 #define NVME_CMBSZ_WDS(cmbsz) ((cmbsz >> CMBSZ_WDS_SHIFT) & CMBSZ_WDS_MASK)
187 #define NVME_CMBSZ_SZU(cmbsz) ((cmbsz >> CMBSZ_SZU_SHIFT) & CMBSZ_SZU_MASK)
188 #define NVME_CMBSZ_SZ(cmbsz) ((cmbsz >> CMBSZ_SZ_SHIFT) & CMBSZ_SZ_MASK)
190 #define NVME_CMBSZ_SET_SQS(cmbsz, val) \
191 (cmbsz |= (uint64_t)(val & CMBSZ_SQS_MASK) << CMBSZ_SQS_SHIFT)
192 #define NVME_CMBSZ_SET_CQS(cmbsz, val) \
193 (cmbsz |= (uint64_t)(val & CMBSZ_CQS_MASK) << CMBSZ_CQS_SHIFT)
194 #define NVME_CMBSZ_SET_LISTS(cmbsz, val) \
195 (cmbsz |= (uint64_t)(val & CMBSZ_LISTS_MASK) << CMBSZ_LISTS_SHIFT)
196 #define NVME_CMBSZ_SET_RDS(cmbsz, val) \
197 (cmbsz |= (uint64_t)(val & CMBSZ_RDS_MASK) << CMBSZ_RDS_SHIFT)
198 #define NVME_CMBSZ_SET_WDS(cmbsz, val) \
199 (cmbsz |= (uint64_t)(val & CMBSZ_WDS_MASK) << CMBSZ_WDS_SHIFT)
200 #define NVME_CMBSZ_SET_SZU(cmbsz, val) \
201 (cmbsz |= (uint64_t)(val & CMBSZ_SZU_MASK) << CMBSZ_SZU_SHIFT)
202 #define NVME_CMBSZ_SET_SZ(cmbsz, val) \
203 (cmbsz |= (uint64_t)(val & CMBSZ_SZ_MASK) << CMBSZ_SZ_SHIFT)
205 #define NVME_CMBSZ_GETSIZE(cmbsz) \
206 (NVME_CMBSZ_SZ(cmbsz) * (1 << (12 + 4 * NVME_CMBSZ_SZU(cmbsz))))
208 typedef struct NvmeCmd
{
225 enum NvmeAdminCommands
{
226 NVME_ADM_CMD_DELETE_SQ
= 0x00,
227 NVME_ADM_CMD_CREATE_SQ
= 0x01,
228 NVME_ADM_CMD_GET_LOG_PAGE
= 0x02,
229 NVME_ADM_CMD_DELETE_CQ
= 0x04,
230 NVME_ADM_CMD_CREATE_CQ
= 0x05,
231 NVME_ADM_CMD_IDENTIFY
= 0x06,
232 NVME_ADM_CMD_ABORT
= 0x08,
233 NVME_ADM_CMD_SET_FEATURES
= 0x09,
234 NVME_ADM_CMD_GET_FEATURES
= 0x0a,
235 NVME_ADM_CMD_ASYNC_EV_REQ
= 0x0c,
236 NVME_ADM_CMD_ACTIVATE_FW
= 0x10,
237 NVME_ADM_CMD_DOWNLOAD_FW
= 0x11,
238 NVME_ADM_CMD_FORMAT_NVM
= 0x80,
239 NVME_ADM_CMD_SECURITY_SEND
= 0x81,
240 NVME_ADM_CMD_SECURITY_RECV
= 0x82,
243 enum NvmeIoCommands
{
244 NVME_CMD_FLUSH
= 0x00,
245 NVME_CMD_WRITE
= 0x01,
246 NVME_CMD_READ
= 0x02,
247 NVME_CMD_WRITE_UNCOR
= 0x04,
248 NVME_CMD_COMPARE
= 0x05,
249 NVME_CMD_WRITE_ZEROS
= 0x08,
253 typedef struct NvmeDeleteQ
{
263 typedef struct NvmeCreateCq
{
277 #define NVME_CQ_FLAGS_PC(cq_flags) (cq_flags & 0x1)
278 #define NVME_CQ_FLAGS_IEN(cq_flags) ((cq_flags >> 1) & 0x1)
280 typedef struct NvmeCreateSq
{
294 #define NVME_SQ_FLAGS_PC(sq_flags) (sq_flags & 0x1)
295 #define NVME_SQ_FLAGS_QPRIO(sq_flags) ((sq_flags >> 1) & 0x3)
297 enum NvmeQueueFlags
{
299 NVME_Q_PRIO_URGENT
= 0,
300 NVME_Q_PRIO_HIGH
= 1,
301 NVME_Q_PRIO_NORMAL
= 2,
305 typedef struct NvmeIdentify
{
317 typedef struct NvmeRwCmd
{
336 NVME_RW_LR
= 1 << 15,
337 NVME_RW_FUA
= 1 << 14,
338 NVME_RW_DSM_FREQ_UNSPEC
= 0,
339 NVME_RW_DSM_FREQ_TYPICAL
= 1,
340 NVME_RW_DSM_FREQ_RARE
= 2,
341 NVME_RW_DSM_FREQ_READS
= 3,
342 NVME_RW_DSM_FREQ_WRITES
= 4,
343 NVME_RW_DSM_FREQ_RW
= 5,
344 NVME_RW_DSM_FREQ_ONCE
= 6,
345 NVME_RW_DSM_FREQ_PREFETCH
= 7,
346 NVME_RW_DSM_FREQ_TEMP
= 8,
347 NVME_RW_DSM_LATENCY_NONE
= 0 << 4,
348 NVME_RW_DSM_LATENCY_IDLE
= 1 << 4,
349 NVME_RW_DSM_LATENCY_NORM
= 2 << 4,
350 NVME_RW_DSM_LATENCY_LOW
= 3 << 4,
351 NVME_RW_DSM_SEQ_REQ
= 1 << 6,
352 NVME_RW_DSM_COMPRESSED
= 1 << 7,
353 NVME_RW_PRINFO_PRACT
= 1 << 13,
354 NVME_RW_PRINFO_PRCHK_GUARD
= 1 << 12,
355 NVME_RW_PRINFO_PRCHK_APP
= 1 << 11,
356 NVME_RW_PRINFO_PRCHK_REF
= 1 << 10,
359 typedef struct NvmeDsmCmd
{
373 NVME_DSMGMT_IDR
= 1 << 0,
374 NVME_DSMGMT_IDW
= 1 << 1,
375 NVME_DSMGMT_AD
= 1 << 2,
378 typedef struct NvmeDsmRange
{
384 enum NvmeAsyncEventRequest
{
385 NVME_AER_TYPE_ERROR
= 0,
386 NVME_AER_TYPE_SMART
= 1,
387 NVME_AER_TYPE_IO_SPECIFIC
= 6,
388 NVME_AER_TYPE_VENDOR_SPECIFIC
= 7,
389 NVME_AER_INFO_ERR_INVALID_SQ
= 0,
390 NVME_AER_INFO_ERR_INVALID_DB
= 1,
391 NVME_AER_INFO_ERR_DIAG_FAIL
= 2,
392 NVME_AER_INFO_ERR_PERS_INTERNAL_ERR
= 3,
393 NVME_AER_INFO_ERR_TRANS_INTERNAL_ERR
= 4,
394 NVME_AER_INFO_ERR_FW_IMG_LOAD_ERR
= 5,
395 NVME_AER_INFO_SMART_RELIABILITY
= 0,
396 NVME_AER_INFO_SMART_TEMP_THRESH
= 1,
397 NVME_AER_INFO_SMART_SPARE_THRESH
= 2,
400 typedef struct NvmeAerResult
{
407 typedef struct NvmeCqe
{
416 enum NvmeStatusCodes
{
417 NVME_SUCCESS
= 0x0000,
418 NVME_INVALID_OPCODE
= 0x0001,
419 NVME_INVALID_FIELD
= 0x0002,
420 NVME_CID_CONFLICT
= 0x0003,
421 NVME_DATA_TRAS_ERROR
= 0x0004,
422 NVME_POWER_LOSS_ABORT
= 0x0005,
423 NVME_INTERNAL_DEV_ERROR
= 0x0006,
424 NVME_CMD_ABORT_REQ
= 0x0007,
425 NVME_CMD_ABORT_SQ_DEL
= 0x0008,
426 NVME_CMD_ABORT_FAILED_FUSE
= 0x0009,
427 NVME_CMD_ABORT_MISSING_FUSE
= 0x000a,
428 NVME_INVALID_NSID
= 0x000b,
429 NVME_CMD_SEQ_ERROR
= 0x000c,
430 NVME_LBA_RANGE
= 0x0080,
431 NVME_CAP_EXCEEDED
= 0x0081,
432 NVME_NS_NOT_READY
= 0x0082,
433 NVME_NS_RESV_CONFLICT
= 0x0083,
434 NVME_INVALID_CQID
= 0x0100,
435 NVME_INVALID_QID
= 0x0101,
436 NVME_MAX_QSIZE_EXCEEDED
= 0x0102,
437 NVME_ACL_EXCEEDED
= 0x0103,
438 NVME_RESERVED
= 0x0104,
439 NVME_AER_LIMIT_EXCEEDED
= 0x0105,
440 NVME_INVALID_FW_SLOT
= 0x0106,
441 NVME_INVALID_FW_IMAGE
= 0x0107,
442 NVME_INVALID_IRQ_VECTOR
= 0x0108,
443 NVME_INVALID_LOG_ID
= 0x0109,
444 NVME_INVALID_FORMAT
= 0x010a,
445 NVME_FW_REQ_RESET
= 0x010b,
446 NVME_INVALID_QUEUE_DEL
= 0x010c,
447 NVME_FID_NOT_SAVEABLE
= 0x010d,
448 NVME_FID_NOT_NSID_SPEC
= 0x010f,
449 NVME_FW_REQ_SUSYSTEM_RESET
= 0x0110,
450 NVME_CONFLICTING_ATTRS
= 0x0180,
451 NVME_INVALID_PROT_INFO
= 0x0181,
452 NVME_WRITE_TO_RO
= 0x0182,
453 NVME_WRITE_FAULT
= 0x0280,
454 NVME_UNRECOVERED_READ
= 0x0281,
455 NVME_E2E_GUARD_ERROR
= 0x0282,
456 NVME_E2E_APP_ERROR
= 0x0283,
457 NVME_E2E_REF_ERROR
= 0x0284,
458 NVME_CMP_FAILURE
= 0x0285,
459 NVME_ACCESS_DENIED
= 0x0286,
462 NVME_NO_COMPLETE
= 0xffff,
465 typedef struct NvmeFwSlotInfoLog
{
467 uint8_t reserved1
[7];
475 uint8_t reserved2
[448];
478 typedef struct NvmeErrorLog
{
479 uint64_t error_count
;
482 uint16_t status_field
;
483 uint16_t param_error_location
;
490 typedef struct NvmeSmartLog
{
491 uint8_t critical_warning
;
492 uint8_t temperature
[2];
493 uint8_t available_spare
;
494 uint8_t available_spare_threshold
;
495 uint8_t percentage_used
;
496 uint8_t reserved1
[26];
497 uint64_t data_units_read
[2];
498 uint64_t data_units_written
[2];
499 uint64_t host_read_commands
[2];
500 uint64_t host_write_commands
[2];
501 uint64_t controller_busy_time
[2];
502 uint64_t power_cycles
[2];
503 uint64_t power_on_hours
[2];
504 uint64_t unsafe_shutdowns
[2];
505 uint64_t media_errors
[2];
506 uint64_t number_of_error_log_entries
[2];
507 uint8_t reserved2
[320];
511 NVME_SMART_SPARE
= 1 << 0,
512 NVME_SMART_TEMPERATURE
= 1 << 1,
513 NVME_SMART_RELIABILITY
= 1 << 2,
514 NVME_SMART_MEDIA_READ_ONLY
= 1 << 3,
515 NVME_SMART_FAILED_VOLATILE_MEDIA
= 1 << 4,
519 NVME_LOG_ERROR_INFO
= 0x01,
520 NVME_LOG_SMART_INFO
= 0x02,
521 NVME_LOG_FW_SLOT_INFO
= 0x03,
524 typedef struct NvmePSD
{
536 typedef struct NvmeIdCtrl
{
546 uint8_t rsvd255
[178];
554 uint8_t rsvd511
[248];
565 uint8_t rsvd703
[174];
566 uint8_t rsvd2047
[1344];
571 enum NvmeIdCtrlOacs
{
572 NVME_OACS_SECURITY
= 1 << 0,
573 NVME_OACS_FORMAT
= 1 << 1,
574 NVME_OACS_FW
= 1 << 2,
577 enum NvmeIdCtrlOncs
{
578 NVME_ONCS_COMPARE
= 1 << 0,
579 NVME_ONCS_WRITE_UNCORR
= 1 << 1,
580 NVME_ONCS_DSM
= 1 << 2,
581 NVME_ONCS_WRITE_ZEROS
= 1 << 3,
582 NVME_ONCS_FEATURES
= 1 << 4,
583 NVME_ONCS_RESRVATIONS
= 1 << 5,
584 NVME_ONCS_TIMESTAMP
= 1 << 6,
587 #define NVME_CTRL_SQES_MIN(sqes) ((sqes) & 0xf)
588 #define NVME_CTRL_SQES_MAX(sqes) (((sqes) >> 4) & 0xf)
589 #define NVME_CTRL_CQES_MIN(cqes) ((cqes) & 0xf)
590 #define NVME_CTRL_CQES_MAX(cqes) (((cqes) >> 4) & 0xf)
592 typedef struct NvmeFeatureVal
{
593 uint32_t arbitration
;
595 uint32_t temp_thresh
;
597 uint32_t volatile_wc
;
599 uint32_t int_coalescing
;
600 uint32_t *int_vector_config
;
601 uint32_t write_atomicity
;
602 uint32_t async_config
;
603 uint32_t sw_prog_marker
;
606 #define NVME_ARB_AB(arb) (arb & 0x7)
607 #define NVME_ARB_LPW(arb) ((arb >> 8) & 0xff)
608 #define NVME_ARB_MPW(arb) ((arb >> 16) & 0xff)
609 #define NVME_ARB_HPW(arb) ((arb >> 24) & 0xff)
611 #define NVME_INTC_THR(intc) (intc & 0xff)
612 #define NVME_INTC_TIME(intc) ((intc >> 8) & 0xff)
614 enum NvmeFeatureIds
{
615 NVME_ARBITRATION
= 0x1,
616 NVME_POWER_MANAGEMENT
= 0x2,
617 NVME_LBA_RANGE_TYPE
= 0x3,
618 NVME_TEMPERATURE_THRESHOLD
= 0x4,
619 NVME_ERROR_RECOVERY
= 0x5,
620 NVME_VOLATILE_WRITE_CACHE
= 0x6,
621 NVME_NUMBER_OF_QUEUES
= 0x7,
622 NVME_INTERRUPT_COALESCING
= 0x8,
623 NVME_INTERRUPT_VECTOR_CONF
= 0x9,
624 NVME_WRITE_ATOMICITY
= 0xa,
625 NVME_ASYNCHRONOUS_EVENT_CONF
= 0xb,
626 NVME_TIMESTAMP
= 0xe,
627 NVME_SOFTWARE_PROGRESS_MARKER
= 0x80
630 typedef struct NvmeRangeType
{
640 typedef struct NvmeLBAF
{
646 typedef struct NvmeIdNs
{
669 /*Deallocate Logical Block Features*/
670 #define NVME_ID_NS_DLFEAT_GUARD_CRC(dlfeat) ((dlfeat) & 0x10)
671 #define NVME_ID_NS_DLFEAT_WRITE_ZEROES(dlfeat) ((dlfeat) & 0x08)
673 #define NVME_ID_NS_DLFEAT_READ_BEHAVIOR(dlfeat) ((dlfeat) & 0x7)
674 #define NVME_ID_NS_DLFEAT_READ_BEHAVIOR_UNDEFINED 0
675 #define NVME_ID_NS_DLFEAT_READ_BEHAVIOR_ZEROES 1
676 #define NVME_ID_NS_DLFEAT_READ_BEHAVIOR_ONES 2
679 #define NVME_ID_NS_NSFEAT_THIN(nsfeat) ((nsfeat & 0x1))
680 #define NVME_ID_NS_FLBAS_EXTENDED(flbas) ((flbas >> 4) & 0x1)
681 #define NVME_ID_NS_FLBAS_INDEX(flbas) ((flbas & 0xf))
682 #define NVME_ID_NS_MC_SEPARATE(mc) ((mc >> 1) & 0x1)
683 #define NVME_ID_NS_MC_EXTENDED(mc) ((mc & 0x1))
684 #define NVME_ID_NS_DPC_LAST_EIGHT(dpc) ((dpc >> 4) & 0x1)
685 #define NVME_ID_NS_DPC_FIRST_EIGHT(dpc) ((dpc >> 3) & 0x1)
686 #define NVME_ID_NS_DPC_TYPE_3(dpc) ((dpc >> 2) & 0x1)
687 #define NVME_ID_NS_DPC_TYPE_2(dpc) ((dpc >> 1) & 0x1)
688 #define NVME_ID_NS_DPC_TYPE_1(dpc) ((dpc & 0x1))
689 #define NVME_ID_NS_DPC_TYPE_MASK 0x7
700 static inline void _nvme_check_size(void)
702 QEMU_BUILD_BUG_ON(sizeof(NvmeAerResult
) != 4);
703 QEMU_BUILD_BUG_ON(sizeof(NvmeCqe
) != 16);
704 QEMU_BUILD_BUG_ON(sizeof(NvmeDsmRange
) != 16);
705 QEMU_BUILD_BUG_ON(sizeof(NvmeCmd
) != 64);
706 QEMU_BUILD_BUG_ON(sizeof(NvmeDeleteQ
) != 64);
707 QEMU_BUILD_BUG_ON(sizeof(NvmeCreateCq
) != 64);
708 QEMU_BUILD_BUG_ON(sizeof(NvmeCreateSq
) != 64);
709 QEMU_BUILD_BUG_ON(sizeof(NvmeIdentify
) != 64);
710 QEMU_BUILD_BUG_ON(sizeof(NvmeRwCmd
) != 64);
711 QEMU_BUILD_BUG_ON(sizeof(NvmeDsmCmd
) != 64);
712 QEMU_BUILD_BUG_ON(sizeof(NvmeRangeType
) != 64);
713 QEMU_BUILD_BUG_ON(sizeof(NvmeErrorLog
) != 64);
714 QEMU_BUILD_BUG_ON(sizeof(NvmeFwSlotInfoLog
) != 512);
715 QEMU_BUILD_BUG_ON(sizeof(NvmeSmartLog
) != 512);
716 QEMU_BUILD_BUG_ON(sizeof(NvmeIdCtrl
) != 4096);
717 QEMU_BUILD_BUG_ON(sizeof(NvmeIdNs
) != 4096);