armv7m: Classify faults as MemManage or BusFault
[qemu/ar7.git] / hw / block / nvme.h
blobb4961d2547f95a2dd08804a3b5aa42236c813a1f
1 #ifndef HW_NVME_H
2 #define HW_NVME_H
3 #include "qemu/cutils.h"
5 typedef struct NvmeBar {
6 uint64_t cap;
7 uint32_t vs;
8 uint32_t intms;
9 uint32_t intmc;
10 uint32_t cc;
11 uint32_t rsvd1;
12 uint32_t csts;
13 uint32_t nssrc;
14 uint32_t aqa;
15 uint64_t asq;
16 uint64_t acq;
17 uint32_t cmbloc;
18 uint32_t cmbsz;
19 } NvmeBar;
21 enum NvmeCapShift {
22 CAP_MQES_SHIFT = 0,
23 CAP_CQR_SHIFT = 16,
24 CAP_AMS_SHIFT = 17,
25 CAP_TO_SHIFT = 24,
26 CAP_DSTRD_SHIFT = 32,
27 CAP_NSSRS_SHIFT = 33,
28 CAP_CSS_SHIFT = 37,
29 CAP_MPSMIN_SHIFT = 48,
30 CAP_MPSMAX_SHIFT = 52,
33 enum NvmeCapMask {
34 CAP_MQES_MASK = 0xffff,
35 CAP_CQR_MASK = 0x1,
36 CAP_AMS_MASK = 0x3,
37 CAP_TO_MASK = 0xff,
38 CAP_DSTRD_MASK = 0xf,
39 CAP_NSSRS_MASK = 0x1,
40 CAP_CSS_MASK = 0xff,
41 CAP_MPSMIN_MASK = 0xf,
42 CAP_MPSMAX_MASK = 0xf,
45 #define NVME_CAP_MQES(cap) (((cap) >> CAP_MQES_SHIFT) & CAP_MQES_MASK)
46 #define NVME_CAP_CQR(cap) (((cap) >> CAP_CQR_SHIFT) & CAP_CQR_MASK)
47 #define NVME_CAP_AMS(cap) (((cap) >> CAP_AMS_SHIFT) & CAP_AMS_MASK)
48 #define NVME_CAP_TO(cap) (((cap) >> CAP_TO_SHIFT) & CAP_TO_MASK)
49 #define NVME_CAP_DSTRD(cap) (((cap) >> CAP_DSTRD_SHIFT) & CAP_DSTRD_MASK)
50 #define NVME_CAP_NSSRS(cap) (((cap) >> CAP_NSSRS_SHIFT) & CAP_NSSRS_MASK)
51 #define NVME_CAP_CSS(cap) (((cap) >> CAP_CSS_SHIFT) & CAP_CSS_MASK)
52 #define NVME_CAP_MPSMIN(cap)(((cap) >> CAP_MPSMIN_SHIFT) & CAP_MPSMIN_MASK)
53 #define NVME_CAP_MPSMAX(cap)(((cap) >> CAP_MPSMAX_SHIFT) & CAP_MPSMAX_MASK)
55 #define NVME_CAP_SET_MQES(cap, val) (cap |= (uint64_t)(val & CAP_MQES_MASK) \
56 << CAP_MQES_SHIFT)
57 #define NVME_CAP_SET_CQR(cap, val) (cap |= (uint64_t)(val & CAP_CQR_MASK) \
58 << CAP_CQR_SHIFT)
59 #define NVME_CAP_SET_AMS(cap, val) (cap |= (uint64_t)(val & CAP_AMS_MASK) \
60 << CAP_AMS_SHIFT)
61 #define NVME_CAP_SET_TO(cap, val) (cap |= (uint64_t)(val & CAP_TO_MASK) \
62 << CAP_TO_SHIFT)
63 #define NVME_CAP_SET_DSTRD(cap, val) (cap |= (uint64_t)(val & CAP_DSTRD_MASK) \
64 << CAP_DSTRD_SHIFT)
65 #define NVME_CAP_SET_NSSRS(cap, val) (cap |= (uint64_t)(val & CAP_NSSRS_MASK) \
66 << CAP_NSSRS_SHIFT)
67 #define NVME_CAP_SET_CSS(cap, val) (cap |= (uint64_t)(val & CAP_CSS_MASK) \
68 << CAP_CSS_SHIFT)
69 #define NVME_CAP_SET_MPSMIN(cap, val) (cap |= (uint64_t)(val & CAP_MPSMIN_MASK)\
70 << CAP_MPSMIN_SHIFT)
71 #define NVME_CAP_SET_MPSMAX(cap, val) (cap |= (uint64_t)(val & CAP_MPSMAX_MASK)\
72 << CAP_MPSMAX_SHIFT)
74 enum NvmeCcShift {
75 CC_EN_SHIFT = 0,
76 CC_CSS_SHIFT = 4,
77 CC_MPS_SHIFT = 7,
78 CC_AMS_SHIFT = 11,
79 CC_SHN_SHIFT = 14,
80 CC_IOSQES_SHIFT = 16,
81 CC_IOCQES_SHIFT = 20,
84 enum NvmeCcMask {
85 CC_EN_MASK = 0x1,
86 CC_CSS_MASK = 0x7,
87 CC_MPS_MASK = 0xf,
88 CC_AMS_MASK = 0x7,
89 CC_SHN_MASK = 0x3,
90 CC_IOSQES_MASK = 0xf,
91 CC_IOCQES_MASK = 0xf,
94 #define NVME_CC_EN(cc) ((cc >> CC_EN_SHIFT) & CC_EN_MASK)
95 #define NVME_CC_CSS(cc) ((cc >> CC_CSS_SHIFT) & CC_CSS_MASK)
96 #define NVME_CC_MPS(cc) ((cc >> CC_MPS_SHIFT) & CC_MPS_MASK)
97 #define NVME_CC_AMS(cc) ((cc >> CC_AMS_SHIFT) & CC_AMS_MASK)
98 #define NVME_CC_SHN(cc) ((cc >> CC_SHN_SHIFT) & CC_SHN_MASK)
99 #define NVME_CC_IOSQES(cc) ((cc >> CC_IOSQES_SHIFT) & CC_IOSQES_MASK)
100 #define NVME_CC_IOCQES(cc) ((cc >> CC_IOCQES_SHIFT) & CC_IOCQES_MASK)
102 enum NvmeCstsShift {
103 CSTS_RDY_SHIFT = 0,
104 CSTS_CFS_SHIFT = 1,
105 CSTS_SHST_SHIFT = 2,
106 CSTS_NSSRO_SHIFT = 4,
109 enum NvmeCstsMask {
110 CSTS_RDY_MASK = 0x1,
111 CSTS_CFS_MASK = 0x1,
112 CSTS_SHST_MASK = 0x3,
113 CSTS_NSSRO_MASK = 0x1,
116 enum NvmeCsts {
117 NVME_CSTS_READY = 1 << CSTS_RDY_SHIFT,
118 NVME_CSTS_FAILED = 1 << CSTS_CFS_SHIFT,
119 NVME_CSTS_SHST_NORMAL = 0 << CSTS_SHST_SHIFT,
120 NVME_CSTS_SHST_PROGRESS = 1 << CSTS_SHST_SHIFT,
121 NVME_CSTS_SHST_COMPLETE = 2 << CSTS_SHST_SHIFT,
122 NVME_CSTS_NSSRO = 1 << CSTS_NSSRO_SHIFT,
125 #define NVME_CSTS_RDY(csts) ((csts >> CSTS_RDY_SHIFT) & CSTS_RDY_MASK)
126 #define NVME_CSTS_CFS(csts) ((csts >> CSTS_CFS_SHIFT) & CSTS_CFS_MASK)
127 #define NVME_CSTS_SHST(csts) ((csts >> CSTS_SHST_SHIFT) & CSTS_SHST_MASK)
128 #define NVME_CSTS_NSSRO(csts) ((csts >> CSTS_NSSRO_SHIFT) & CSTS_NSSRO_MASK)
130 enum NvmeAqaShift {
131 AQA_ASQS_SHIFT = 0,
132 AQA_ACQS_SHIFT = 16,
135 enum NvmeAqaMask {
136 AQA_ASQS_MASK = 0xfff,
137 AQA_ACQS_MASK = 0xfff,
140 #define NVME_AQA_ASQS(aqa) ((aqa >> AQA_ASQS_SHIFT) & AQA_ASQS_MASK)
141 #define NVME_AQA_ACQS(aqa) ((aqa >> AQA_ACQS_SHIFT) & AQA_ACQS_MASK)
143 enum NvmeCmblocShift {
144 CMBLOC_BIR_SHIFT = 0,
145 CMBLOC_OFST_SHIFT = 12,
148 enum NvmeCmblocMask {
149 CMBLOC_BIR_MASK = 0x7,
150 CMBLOC_OFST_MASK = 0xfffff,
153 #define NVME_CMBLOC_BIR(cmbloc) ((cmbloc >> CMBLOC_BIR_SHIFT) & \
154 CMBLOC_BIR_MASK)
155 #define NVME_CMBLOC_OFST(cmbloc)((cmbloc >> CMBLOC_OFST_SHIFT) & \
156 CMBLOC_OFST_MASK)
158 #define NVME_CMBLOC_SET_BIR(cmbloc, val) \
159 (cmbloc |= (uint64_t)(val & CMBLOC_BIR_MASK) << CMBLOC_BIR_SHIFT)
160 #define NVME_CMBLOC_SET_OFST(cmbloc, val) \
161 (cmbloc |= (uint64_t)(val & CMBLOC_OFST_MASK) << CMBLOC_OFST_SHIFT)
163 enum NvmeCmbszShift {
164 CMBSZ_SQS_SHIFT = 0,
165 CMBSZ_CQS_SHIFT = 1,
166 CMBSZ_LISTS_SHIFT = 2,
167 CMBSZ_RDS_SHIFT = 3,
168 CMBSZ_WDS_SHIFT = 4,
169 CMBSZ_SZU_SHIFT = 8,
170 CMBSZ_SZ_SHIFT = 12,
173 enum NvmeCmbszMask {
174 CMBSZ_SQS_MASK = 0x1,
175 CMBSZ_CQS_MASK = 0x1,
176 CMBSZ_LISTS_MASK = 0x1,
177 CMBSZ_RDS_MASK = 0x1,
178 CMBSZ_WDS_MASK = 0x1,
179 CMBSZ_SZU_MASK = 0xf,
180 CMBSZ_SZ_MASK = 0xfffff,
183 #define NVME_CMBSZ_SQS(cmbsz) ((cmbsz >> CMBSZ_SQS_SHIFT) & CMBSZ_SQS_MASK)
184 #define NVME_CMBSZ_CQS(cmbsz) ((cmbsz >> CMBSZ_CQS_SHIFT) & CMBSZ_CQS_MASK)
185 #define NVME_CMBSZ_LISTS(cmbsz)((cmbsz >> CMBSZ_LISTS_SHIFT) & CMBSZ_LISTS_MASK)
186 #define NVME_CMBSZ_RDS(cmbsz) ((cmbsz >> CMBSZ_RDS_SHIFT) & CMBSZ_RDS_MASK)
187 #define NVME_CMBSZ_WDS(cmbsz) ((cmbsz >> CMBSZ_WDS_SHIFT) & CMBSZ_WDS_MASK)
188 #define NVME_CMBSZ_SZU(cmbsz) ((cmbsz >> CMBSZ_SZU_SHIFT) & CMBSZ_SZU_MASK)
189 #define NVME_CMBSZ_SZ(cmbsz) ((cmbsz >> CMBSZ_SZ_SHIFT) & CMBSZ_SZ_MASK)
191 #define NVME_CMBSZ_SET_SQS(cmbsz, val) \
192 (cmbsz |= (uint64_t)(val & CMBSZ_SQS_MASK) << CMBSZ_SQS_SHIFT)
193 #define NVME_CMBSZ_SET_CQS(cmbsz, val) \
194 (cmbsz |= (uint64_t)(val & CMBSZ_CQS_MASK) << CMBSZ_CQS_SHIFT)
195 #define NVME_CMBSZ_SET_LISTS(cmbsz, val) \
196 (cmbsz |= (uint64_t)(val & CMBSZ_LISTS_MASK) << CMBSZ_LISTS_SHIFT)
197 #define NVME_CMBSZ_SET_RDS(cmbsz, val) \
198 (cmbsz |= (uint64_t)(val & CMBSZ_RDS_MASK) << CMBSZ_RDS_SHIFT)
199 #define NVME_CMBSZ_SET_WDS(cmbsz, val) \
200 (cmbsz |= (uint64_t)(val & CMBSZ_WDS_MASK) << CMBSZ_WDS_SHIFT)
201 #define NVME_CMBSZ_SET_SZU(cmbsz, val) \
202 (cmbsz |= (uint64_t)(val & CMBSZ_SZU_MASK) << CMBSZ_SZU_SHIFT)
203 #define NVME_CMBSZ_SET_SZ(cmbsz, val) \
204 (cmbsz |= (uint64_t)(val & CMBSZ_SZ_MASK) << CMBSZ_SZ_SHIFT)
206 #define NVME_CMBSZ_GETSIZE(cmbsz) \
207 (NVME_CMBSZ_SZ(cmbsz) * (1 << (12 + 4 * NVME_CMBSZ_SZU(cmbsz))))
209 typedef struct NvmeCmd {
210 uint8_t opcode;
211 uint8_t fuse;
212 uint16_t cid;
213 uint32_t nsid;
214 uint64_t res1;
215 uint64_t mptr;
216 uint64_t prp1;
217 uint64_t prp2;
218 uint32_t cdw10;
219 uint32_t cdw11;
220 uint32_t cdw12;
221 uint32_t cdw13;
222 uint32_t cdw14;
223 uint32_t cdw15;
224 } NvmeCmd;
226 enum NvmeAdminCommands {
227 NVME_ADM_CMD_DELETE_SQ = 0x00,
228 NVME_ADM_CMD_CREATE_SQ = 0x01,
229 NVME_ADM_CMD_GET_LOG_PAGE = 0x02,
230 NVME_ADM_CMD_DELETE_CQ = 0x04,
231 NVME_ADM_CMD_CREATE_CQ = 0x05,
232 NVME_ADM_CMD_IDENTIFY = 0x06,
233 NVME_ADM_CMD_ABORT = 0x08,
234 NVME_ADM_CMD_SET_FEATURES = 0x09,
235 NVME_ADM_CMD_GET_FEATURES = 0x0a,
236 NVME_ADM_CMD_ASYNC_EV_REQ = 0x0c,
237 NVME_ADM_CMD_ACTIVATE_FW = 0x10,
238 NVME_ADM_CMD_DOWNLOAD_FW = 0x11,
239 NVME_ADM_CMD_FORMAT_NVM = 0x80,
240 NVME_ADM_CMD_SECURITY_SEND = 0x81,
241 NVME_ADM_CMD_SECURITY_RECV = 0x82,
244 enum NvmeIoCommands {
245 NVME_CMD_FLUSH = 0x00,
246 NVME_CMD_WRITE = 0x01,
247 NVME_CMD_READ = 0x02,
248 NVME_CMD_WRITE_UNCOR = 0x04,
249 NVME_CMD_COMPARE = 0x05,
250 NVME_CMD_WRITE_ZEROS = 0x08,
251 NVME_CMD_DSM = 0x09,
254 typedef struct NvmeDeleteQ {
255 uint8_t opcode;
256 uint8_t flags;
257 uint16_t cid;
258 uint32_t rsvd1[9];
259 uint16_t qid;
260 uint16_t rsvd10;
261 uint32_t rsvd11[5];
262 } NvmeDeleteQ;
264 typedef struct NvmeCreateCq {
265 uint8_t opcode;
266 uint8_t flags;
267 uint16_t cid;
268 uint32_t rsvd1[5];
269 uint64_t prp1;
270 uint64_t rsvd8;
271 uint16_t cqid;
272 uint16_t qsize;
273 uint16_t cq_flags;
274 uint16_t irq_vector;
275 uint32_t rsvd12[4];
276 } NvmeCreateCq;
278 #define NVME_CQ_FLAGS_PC(cq_flags) (cq_flags & 0x1)
279 #define NVME_CQ_FLAGS_IEN(cq_flags) ((cq_flags >> 1) & 0x1)
281 typedef struct NvmeCreateSq {
282 uint8_t opcode;
283 uint8_t flags;
284 uint16_t cid;
285 uint32_t rsvd1[5];
286 uint64_t prp1;
287 uint64_t rsvd8;
288 uint16_t sqid;
289 uint16_t qsize;
290 uint16_t sq_flags;
291 uint16_t cqid;
292 uint32_t rsvd12[4];
293 } NvmeCreateSq;
295 #define NVME_SQ_FLAGS_PC(sq_flags) (sq_flags & 0x1)
296 #define NVME_SQ_FLAGS_QPRIO(sq_flags) ((sq_flags >> 1) & 0x3)
298 enum NvmeQueueFlags {
299 NVME_Q_PC = 1,
300 NVME_Q_PRIO_URGENT = 0,
301 NVME_Q_PRIO_HIGH = 1,
302 NVME_Q_PRIO_NORMAL = 2,
303 NVME_Q_PRIO_LOW = 3,
306 typedef struct NvmeIdentify {
307 uint8_t opcode;
308 uint8_t flags;
309 uint16_t cid;
310 uint32_t nsid;
311 uint64_t rsvd2[2];
312 uint64_t prp1;
313 uint64_t prp2;
314 uint32_t cns;
315 uint32_t rsvd11[5];
316 } NvmeIdentify;
318 typedef struct NvmeRwCmd {
319 uint8_t opcode;
320 uint8_t flags;
321 uint16_t cid;
322 uint32_t nsid;
323 uint64_t rsvd2;
324 uint64_t mptr;
325 uint64_t prp1;
326 uint64_t prp2;
327 uint64_t slba;
328 uint16_t nlb;
329 uint16_t control;
330 uint32_t dsmgmt;
331 uint32_t reftag;
332 uint16_t apptag;
333 uint16_t appmask;
334 } NvmeRwCmd;
336 enum {
337 NVME_RW_LR = 1 << 15,
338 NVME_RW_FUA = 1 << 14,
339 NVME_RW_DSM_FREQ_UNSPEC = 0,
340 NVME_RW_DSM_FREQ_TYPICAL = 1,
341 NVME_RW_DSM_FREQ_RARE = 2,
342 NVME_RW_DSM_FREQ_READS = 3,
343 NVME_RW_DSM_FREQ_WRITES = 4,
344 NVME_RW_DSM_FREQ_RW = 5,
345 NVME_RW_DSM_FREQ_ONCE = 6,
346 NVME_RW_DSM_FREQ_PREFETCH = 7,
347 NVME_RW_DSM_FREQ_TEMP = 8,
348 NVME_RW_DSM_LATENCY_NONE = 0 << 4,
349 NVME_RW_DSM_LATENCY_IDLE = 1 << 4,
350 NVME_RW_DSM_LATENCY_NORM = 2 << 4,
351 NVME_RW_DSM_LATENCY_LOW = 3 << 4,
352 NVME_RW_DSM_SEQ_REQ = 1 << 6,
353 NVME_RW_DSM_COMPRESSED = 1 << 7,
354 NVME_RW_PRINFO_PRACT = 1 << 13,
355 NVME_RW_PRINFO_PRCHK_GUARD = 1 << 12,
356 NVME_RW_PRINFO_PRCHK_APP = 1 << 11,
357 NVME_RW_PRINFO_PRCHK_REF = 1 << 10,
360 typedef struct NvmeDsmCmd {
361 uint8_t opcode;
362 uint8_t flags;
363 uint16_t cid;
364 uint32_t nsid;
365 uint64_t rsvd2[2];
366 uint64_t prp1;
367 uint64_t prp2;
368 uint32_t nr;
369 uint32_t attributes;
370 uint32_t rsvd12[4];
371 } NvmeDsmCmd;
373 enum {
374 NVME_DSMGMT_IDR = 1 << 0,
375 NVME_DSMGMT_IDW = 1 << 1,
376 NVME_DSMGMT_AD = 1 << 2,
379 typedef struct NvmeDsmRange {
380 uint32_t cattr;
381 uint32_t nlb;
382 uint64_t slba;
383 } NvmeDsmRange;
385 enum NvmeAsyncEventRequest {
386 NVME_AER_TYPE_ERROR = 0,
387 NVME_AER_TYPE_SMART = 1,
388 NVME_AER_TYPE_IO_SPECIFIC = 6,
389 NVME_AER_TYPE_VENDOR_SPECIFIC = 7,
390 NVME_AER_INFO_ERR_INVALID_SQ = 0,
391 NVME_AER_INFO_ERR_INVALID_DB = 1,
392 NVME_AER_INFO_ERR_DIAG_FAIL = 2,
393 NVME_AER_INFO_ERR_PERS_INTERNAL_ERR = 3,
394 NVME_AER_INFO_ERR_TRANS_INTERNAL_ERR = 4,
395 NVME_AER_INFO_ERR_FW_IMG_LOAD_ERR = 5,
396 NVME_AER_INFO_SMART_RELIABILITY = 0,
397 NVME_AER_INFO_SMART_TEMP_THRESH = 1,
398 NVME_AER_INFO_SMART_SPARE_THRESH = 2,
401 typedef struct NvmeAerResult {
402 uint8_t event_type;
403 uint8_t event_info;
404 uint8_t log_page;
405 uint8_t resv;
406 } NvmeAerResult;
408 typedef struct NvmeCqe {
409 uint32_t result;
410 uint32_t rsvd;
411 uint16_t sq_head;
412 uint16_t sq_id;
413 uint16_t cid;
414 uint16_t status;
415 } NvmeCqe;
417 enum NvmeStatusCodes {
418 NVME_SUCCESS = 0x0000,
419 NVME_INVALID_OPCODE = 0x0001,
420 NVME_INVALID_FIELD = 0x0002,
421 NVME_CID_CONFLICT = 0x0003,
422 NVME_DATA_TRAS_ERROR = 0x0004,
423 NVME_POWER_LOSS_ABORT = 0x0005,
424 NVME_INTERNAL_DEV_ERROR = 0x0006,
425 NVME_CMD_ABORT_REQ = 0x0007,
426 NVME_CMD_ABORT_SQ_DEL = 0x0008,
427 NVME_CMD_ABORT_FAILED_FUSE = 0x0009,
428 NVME_CMD_ABORT_MISSING_FUSE = 0x000a,
429 NVME_INVALID_NSID = 0x000b,
430 NVME_CMD_SEQ_ERROR = 0x000c,
431 NVME_LBA_RANGE = 0x0080,
432 NVME_CAP_EXCEEDED = 0x0081,
433 NVME_NS_NOT_READY = 0x0082,
434 NVME_NS_RESV_CONFLICT = 0x0083,
435 NVME_INVALID_CQID = 0x0100,
436 NVME_INVALID_QID = 0x0101,
437 NVME_MAX_QSIZE_EXCEEDED = 0x0102,
438 NVME_ACL_EXCEEDED = 0x0103,
439 NVME_RESERVED = 0x0104,
440 NVME_AER_LIMIT_EXCEEDED = 0x0105,
441 NVME_INVALID_FW_SLOT = 0x0106,
442 NVME_INVALID_FW_IMAGE = 0x0107,
443 NVME_INVALID_IRQ_VECTOR = 0x0108,
444 NVME_INVALID_LOG_ID = 0x0109,
445 NVME_INVALID_FORMAT = 0x010a,
446 NVME_FW_REQ_RESET = 0x010b,
447 NVME_INVALID_QUEUE_DEL = 0x010c,
448 NVME_FID_NOT_SAVEABLE = 0x010d,
449 NVME_FID_NOT_NSID_SPEC = 0x010f,
450 NVME_FW_REQ_SUSYSTEM_RESET = 0x0110,
451 NVME_CONFLICTING_ATTRS = 0x0180,
452 NVME_INVALID_PROT_INFO = 0x0181,
453 NVME_WRITE_TO_RO = 0x0182,
454 NVME_WRITE_FAULT = 0x0280,
455 NVME_UNRECOVERED_READ = 0x0281,
456 NVME_E2E_GUARD_ERROR = 0x0282,
457 NVME_E2E_APP_ERROR = 0x0283,
458 NVME_E2E_REF_ERROR = 0x0284,
459 NVME_CMP_FAILURE = 0x0285,
460 NVME_ACCESS_DENIED = 0x0286,
461 NVME_MORE = 0x2000,
462 NVME_DNR = 0x4000,
463 NVME_NO_COMPLETE = 0xffff,
466 typedef struct NvmeFwSlotInfoLog {
467 uint8_t afi;
468 uint8_t reserved1[7];
469 uint8_t frs1[8];
470 uint8_t frs2[8];
471 uint8_t frs3[8];
472 uint8_t frs4[8];
473 uint8_t frs5[8];
474 uint8_t frs6[8];
475 uint8_t frs7[8];
476 uint8_t reserved2[448];
477 } NvmeFwSlotInfoLog;
479 typedef struct NvmeErrorLog {
480 uint64_t error_count;
481 uint16_t sqid;
482 uint16_t cid;
483 uint16_t status_field;
484 uint16_t param_error_location;
485 uint64_t lba;
486 uint32_t nsid;
487 uint8_t vs;
488 uint8_t resv[35];
489 } NvmeErrorLog;
491 typedef struct NvmeSmartLog {
492 uint8_t critical_warning;
493 uint8_t temperature[2];
494 uint8_t available_spare;
495 uint8_t available_spare_threshold;
496 uint8_t percentage_used;
497 uint8_t reserved1[26];
498 uint64_t data_units_read[2];
499 uint64_t data_units_written[2];
500 uint64_t host_read_commands[2];
501 uint64_t host_write_commands[2];
502 uint64_t controller_busy_time[2];
503 uint64_t power_cycles[2];
504 uint64_t power_on_hours[2];
505 uint64_t unsafe_shutdowns[2];
506 uint64_t media_errors[2];
507 uint64_t number_of_error_log_entries[2];
508 uint8_t reserved2[320];
509 } NvmeSmartLog;
511 enum NvmeSmartWarn {
512 NVME_SMART_SPARE = 1 << 0,
513 NVME_SMART_TEMPERATURE = 1 << 1,
514 NVME_SMART_RELIABILITY = 1 << 2,
515 NVME_SMART_MEDIA_READ_ONLY = 1 << 3,
516 NVME_SMART_FAILED_VOLATILE_MEDIA = 1 << 4,
519 enum LogIdentifier {
520 NVME_LOG_ERROR_INFO = 0x01,
521 NVME_LOG_SMART_INFO = 0x02,
522 NVME_LOG_FW_SLOT_INFO = 0x03,
525 typedef struct NvmePSD {
526 uint16_t mp;
527 uint16_t reserved;
528 uint32_t enlat;
529 uint32_t exlat;
530 uint8_t rrt;
531 uint8_t rrl;
532 uint8_t rwt;
533 uint8_t rwl;
534 uint8_t resv[16];
535 } NvmePSD;
537 typedef struct NvmeIdCtrl {
538 uint16_t vid;
539 uint16_t ssvid;
540 uint8_t sn[20];
541 uint8_t mn[40];
542 uint8_t fr[8];
543 uint8_t rab;
544 uint8_t ieee[3];
545 uint8_t cmic;
546 uint8_t mdts;
547 uint8_t rsvd255[178];
548 uint16_t oacs;
549 uint8_t acl;
550 uint8_t aerl;
551 uint8_t frmw;
552 uint8_t lpa;
553 uint8_t elpe;
554 uint8_t npss;
555 uint8_t rsvd511[248];
556 uint8_t sqes;
557 uint8_t cqes;
558 uint16_t rsvd515;
559 uint32_t nn;
560 uint16_t oncs;
561 uint16_t fuses;
562 uint8_t fna;
563 uint8_t vwc;
564 uint16_t awun;
565 uint16_t awupf;
566 uint8_t rsvd703[174];
567 uint8_t rsvd2047[1344];
568 NvmePSD psd[32];
569 uint8_t vs[1024];
570 } NvmeIdCtrl;
572 enum NvmeIdCtrlOacs {
573 NVME_OACS_SECURITY = 1 << 0,
574 NVME_OACS_FORMAT = 1 << 1,
575 NVME_OACS_FW = 1 << 2,
578 enum NvmeIdCtrlOncs {
579 NVME_ONCS_COMPARE = 1 << 0,
580 NVME_ONCS_WRITE_UNCORR = 1 << 1,
581 NVME_ONCS_DSM = 1 << 2,
582 NVME_ONCS_WRITE_ZEROS = 1 << 3,
583 NVME_ONCS_FEATURES = 1 << 4,
584 NVME_ONCS_RESRVATIONS = 1 << 5,
587 #define NVME_CTRL_SQES_MIN(sqes) ((sqes) & 0xf)
588 #define NVME_CTRL_SQES_MAX(sqes) (((sqes) >> 4) & 0xf)
589 #define NVME_CTRL_CQES_MIN(cqes) ((cqes) & 0xf)
590 #define NVME_CTRL_CQES_MAX(cqes) (((cqes) >> 4) & 0xf)
592 typedef struct NvmeFeatureVal {
593 uint32_t arbitration;
594 uint32_t power_mgmt;
595 uint32_t temp_thresh;
596 uint32_t err_rec;
597 uint32_t volatile_wc;
598 uint32_t num_queues;
599 uint32_t int_coalescing;
600 uint32_t *int_vector_config;
601 uint32_t write_atomicity;
602 uint32_t async_config;
603 uint32_t sw_prog_marker;
604 } NvmeFeatureVal;
606 #define NVME_ARB_AB(arb) (arb & 0x7)
607 #define NVME_ARB_LPW(arb) ((arb >> 8) & 0xff)
608 #define NVME_ARB_MPW(arb) ((arb >> 16) & 0xff)
609 #define NVME_ARB_HPW(arb) ((arb >> 24) & 0xff)
611 #define NVME_INTC_THR(intc) (intc & 0xff)
612 #define NVME_INTC_TIME(intc) ((intc >> 8) & 0xff)
614 enum NvmeFeatureIds {
615 NVME_ARBITRATION = 0x1,
616 NVME_POWER_MANAGEMENT = 0x2,
617 NVME_LBA_RANGE_TYPE = 0x3,
618 NVME_TEMPERATURE_THRESHOLD = 0x4,
619 NVME_ERROR_RECOVERY = 0x5,
620 NVME_VOLATILE_WRITE_CACHE = 0x6,
621 NVME_NUMBER_OF_QUEUES = 0x7,
622 NVME_INTERRUPT_COALESCING = 0x8,
623 NVME_INTERRUPT_VECTOR_CONF = 0x9,
624 NVME_WRITE_ATOMICITY = 0xa,
625 NVME_ASYNCHRONOUS_EVENT_CONF = 0xb,
626 NVME_SOFTWARE_PROGRESS_MARKER = 0x80
629 typedef struct NvmeRangeType {
630 uint8_t type;
631 uint8_t attributes;
632 uint8_t rsvd2[14];
633 uint64_t slba;
634 uint64_t nlb;
635 uint8_t guid[16];
636 uint8_t rsvd48[16];
637 } NvmeRangeType;
639 typedef struct NvmeLBAF {
640 uint16_t ms;
641 uint8_t ds;
642 uint8_t rp;
643 } NvmeLBAF;
645 typedef struct NvmeIdNs {
646 uint64_t nsze;
647 uint64_t ncap;
648 uint64_t nuse;
649 uint8_t nsfeat;
650 uint8_t nlbaf;
651 uint8_t flbas;
652 uint8_t mc;
653 uint8_t dpc;
654 uint8_t dps;
655 uint8_t res30[98];
656 NvmeLBAF lbaf[16];
657 uint8_t res192[192];
658 uint8_t vs[3712];
659 } NvmeIdNs;
661 #define NVME_ID_NS_NSFEAT_THIN(nsfeat) ((nsfeat & 0x1))
662 #define NVME_ID_NS_FLBAS_EXTENDED(flbas) ((flbas >> 4) & 0x1)
663 #define NVME_ID_NS_FLBAS_INDEX(flbas) ((flbas & 0xf))
664 #define NVME_ID_NS_MC_SEPARATE(mc) ((mc >> 1) & 0x1)
665 #define NVME_ID_NS_MC_EXTENDED(mc) ((mc & 0x1))
666 #define NVME_ID_NS_DPC_LAST_EIGHT(dpc) ((dpc >> 4) & 0x1)
667 #define NVME_ID_NS_DPC_FIRST_EIGHT(dpc) ((dpc >> 3) & 0x1)
668 #define NVME_ID_NS_DPC_TYPE_3(dpc) ((dpc >> 2) & 0x1)
669 #define NVME_ID_NS_DPC_TYPE_2(dpc) ((dpc >> 1) & 0x1)
670 #define NVME_ID_NS_DPC_TYPE_1(dpc) ((dpc & 0x1))
671 #define NVME_ID_NS_DPC_TYPE_MASK 0x7
673 enum NvmeIdNsDps {
674 DPS_TYPE_NONE = 0,
675 DPS_TYPE_1 = 1,
676 DPS_TYPE_2 = 2,
677 DPS_TYPE_3 = 3,
678 DPS_TYPE_MASK = 0x7,
679 DPS_FIRST_EIGHT = 8,
682 static inline void _nvme_check_size(void)
684 QEMU_BUILD_BUG_ON(sizeof(NvmeAerResult) != 4);
685 QEMU_BUILD_BUG_ON(sizeof(NvmeCqe) != 16);
686 QEMU_BUILD_BUG_ON(sizeof(NvmeDsmRange) != 16);
687 QEMU_BUILD_BUG_ON(sizeof(NvmeCmd) != 64);
688 QEMU_BUILD_BUG_ON(sizeof(NvmeDeleteQ) != 64);
689 QEMU_BUILD_BUG_ON(sizeof(NvmeCreateCq) != 64);
690 QEMU_BUILD_BUG_ON(sizeof(NvmeCreateSq) != 64);
691 QEMU_BUILD_BUG_ON(sizeof(NvmeIdentify) != 64);
692 QEMU_BUILD_BUG_ON(sizeof(NvmeRwCmd) != 64);
693 QEMU_BUILD_BUG_ON(sizeof(NvmeDsmCmd) != 64);
694 QEMU_BUILD_BUG_ON(sizeof(NvmeRangeType) != 64);
695 QEMU_BUILD_BUG_ON(sizeof(NvmeErrorLog) != 64);
696 QEMU_BUILD_BUG_ON(sizeof(NvmeFwSlotInfoLog) != 512);
697 QEMU_BUILD_BUG_ON(sizeof(NvmeSmartLog) != 512);
698 QEMU_BUILD_BUG_ON(sizeof(NvmeIdCtrl) != 4096);
699 QEMU_BUILD_BUG_ON(sizeof(NvmeIdNs) != 4096);
702 typedef struct NvmeAsyncEvent {
703 QSIMPLEQ_ENTRY(NvmeAsyncEvent) entry;
704 NvmeAerResult result;
705 } NvmeAsyncEvent;
707 typedef struct NvmeRequest {
708 struct NvmeSQueue *sq;
709 BlockAIOCB *aiocb;
710 uint16_t status;
711 bool has_sg;
712 NvmeCqe cqe;
713 BlockAcctCookie acct;
714 QEMUSGList qsg;
715 QTAILQ_ENTRY(NvmeRequest)entry;
716 } NvmeRequest;
718 typedef struct NvmeSQueue {
719 struct NvmeCtrl *ctrl;
720 uint16_t sqid;
721 uint16_t cqid;
722 uint32_t head;
723 uint32_t tail;
724 uint32_t size;
725 uint64_t dma_addr;
726 QEMUTimer *timer;
727 NvmeRequest *io_req;
728 QTAILQ_HEAD(sq_req_list, NvmeRequest) req_list;
729 QTAILQ_HEAD(out_req_list, NvmeRequest) out_req_list;
730 QTAILQ_ENTRY(NvmeSQueue) entry;
731 } NvmeSQueue;
733 typedef struct NvmeCQueue {
734 struct NvmeCtrl *ctrl;
735 uint8_t phase;
736 uint16_t cqid;
737 uint16_t irq_enabled;
738 uint32_t head;
739 uint32_t tail;
740 uint32_t vector;
741 uint32_t size;
742 uint64_t dma_addr;
743 QEMUTimer *timer;
744 QTAILQ_HEAD(sq_list, NvmeSQueue) sq_list;
745 QTAILQ_HEAD(cq_req_list, NvmeRequest) req_list;
746 } NvmeCQueue;
748 typedef struct NvmeNamespace {
749 NvmeIdNs id_ns;
750 } NvmeNamespace;
752 #define TYPE_NVME "nvme"
753 #define NVME(obj) \
754 OBJECT_CHECK(NvmeCtrl, (obj), TYPE_NVME)
756 typedef struct NvmeCtrl {
757 PCIDevice parent_obj;
758 MemoryRegion iomem;
759 MemoryRegion ctrl_mem;
760 NvmeBar bar;
761 BlockConf conf;
763 uint32_t page_size;
764 uint16_t page_bits;
765 uint16_t max_prp_ents;
766 uint16_t cqe_size;
767 uint16_t sqe_size;
768 uint32_t reg_size;
769 uint32_t num_namespaces;
770 uint32_t num_queues;
771 uint32_t max_q_ents;
772 uint64_t ns_size;
773 uint32_t cmb_size_mb;
774 uint32_t cmbsz;
775 uint32_t cmbloc;
776 uint8_t *cmbuf;
778 char *serial;
779 NvmeNamespace *namespaces;
780 NvmeSQueue **sq;
781 NvmeCQueue **cq;
782 NvmeSQueue admin_sq;
783 NvmeCQueue admin_cq;
784 NvmeIdCtrl id_ctrl;
785 } NvmeCtrl;
787 #endif /* HW_NVME_H */