2 * QEMU NVM Express Controller
4 * Copyright (c) 2012, Intel Corporation
6 * Written by Keith Busch <keith.busch@intel.com>
8 * This code is licensed under the GNU GPL v2 or later.
12 * Reference Specs: http://www.nvmexpress.org, 1.2, 1.1, 1.0e
14 * https://nvmexpress.org/developers/nvme-specification/
19 * -drive file=<file>,if=none,id=<drive_id>
20 * -device nvme,serial=<serial>,id=<bus_name>, \
21 * cmb_size_mb=<cmb_size_mb[optional]>, \
22 * [pmrdev=<mem_backend_file_id>,] \
23 * max_ioqpairs=<N[optional]>, \
24 * aerl=<N[optional]>, aer_max_queued=<N[optional]>, \
26 * -device nvme-ns,drive=<drive_id>,bus=bus_name,nsid=<nsid>
28 * Note cmb_size_mb denotes size of CMB in MB. CMB is assumed to be at
29 * offset 0 in BAR2 and supports only WDS, RDS and SQS for now.
31 * cmb_size_mb= and pmrdev= options are mutually exclusive due to limitation
32 * in available BAR's. cmb_size_mb= will take precedence over pmrdev= when
34 * Enabling pmr emulation can be achieved by pointing to memory-backend-file.
36 * -object memory-backend-file,id=<mem_id>,share=on,mem-path=<file_path>, \
37 * size=<size> .... -device nvme,...,pmrdev=<mem_id>
40 * nvme device parameters
41 * ~~~~~~~~~~~~~~~~~~~~~~
43 * The Asynchronous Event Request Limit (AERL). Indicates the maximum number
44 * of concurrently outstanding Asynchronous Event Request commands suppoert
45 * by the controller. This is a 0's based value.
48 * This is the maximum number of events that the device will enqueue for
49 * completion when there are no oustanding AERs. When the maximum number of
50 * enqueued events are reached, subsequent events will be dropped.
54 #include "qemu/osdep.h"
55 #include "qemu/units.h"
56 #include "qemu/error-report.h"
57 #include "hw/block/block.h"
58 #include "hw/pci/msix.h"
59 #include "hw/pci/pci.h"
60 #include "hw/qdev-properties.h"
61 #include "migration/vmstate.h"
62 #include "sysemu/sysemu.h"
63 #include "qapi/error.h"
64 #include "qapi/visitor.h"
65 #include "sysemu/hostmem.h"
66 #include "sysemu/block-backend.h"
67 #include "exec/memory.h"
69 #include "qemu/module.h"
70 #include "qemu/cutils.h"
75 #define NVME_MAX_IOQPAIRS 0xffff
76 #define NVME_DB_SIZE 4
77 #define NVME_SPEC_VER 0x00010300
78 #define NVME_CMB_BIR 2
79 #define NVME_PMR_BIR 2
80 #define NVME_TEMPERATURE 0x143
81 #define NVME_TEMPERATURE_WARNING 0x157
82 #define NVME_TEMPERATURE_CRITICAL 0x175
83 #define NVME_NUM_FW_SLOTS 1
85 #define NVME_GUEST_ERR(trace, fmt, ...) \
87 (trace_##trace)(__VA_ARGS__); \
88 qemu_log_mask(LOG_GUEST_ERROR, #trace \
89 " in %s: " fmt "\n", __func__, ## __VA_ARGS__); \
92 static const bool nvme_feature_support
[NVME_FID_MAX
] = {
93 [NVME_ARBITRATION
] = true,
94 [NVME_POWER_MANAGEMENT
] = true,
95 [NVME_TEMPERATURE_THRESHOLD
] = true,
96 [NVME_ERROR_RECOVERY
] = true,
97 [NVME_VOLATILE_WRITE_CACHE
] = true,
98 [NVME_NUMBER_OF_QUEUES
] = true,
99 [NVME_INTERRUPT_COALESCING
] = true,
100 [NVME_INTERRUPT_VECTOR_CONF
] = true,
101 [NVME_WRITE_ATOMICITY
] = true,
102 [NVME_ASYNCHRONOUS_EVENT_CONF
] = true,
103 [NVME_TIMESTAMP
] = true,
106 static const uint32_t nvme_feature_cap
[NVME_FID_MAX
] = {
107 [NVME_TEMPERATURE_THRESHOLD
] = NVME_FEAT_CAP_CHANGE
,
108 [NVME_VOLATILE_WRITE_CACHE
] = NVME_FEAT_CAP_CHANGE
,
109 [NVME_NUMBER_OF_QUEUES
] = NVME_FEAT_CAP_CHANGE
,
110 [NVME_ASYNCHRONOUS_EVENT_CONF
] = NVME_FEAT_CAP_CHANGE
,
111 [NVME_TIMESTAMP
] = NVME_FEAT_CAP_CHANGE
,
114 static void nvme_process_sq(void *opaque
);
116 static uint16_t nvme_cid(NvmeRequest
*req
)
122 return le16_to_cpu(req
->cqe
.cid
);
125 static uint16_t nvme_sqid(NvmeRequest
*req
)
127 return le16_to_cpu(req
->sq
->sqid
);
130 static bool nvme_addr_is_cmb(NvmeCtrl
*n
, hwaddr addr
)
132 hwaddr low
= n
->ctrl_mem
.addr
;
133 hwaddr hi
= n
->ctrl_mem
.addr
+ int128_get64(n
->ctrl_mem
.size
);
135 return addr
>= low
&& addr
< hi
;
138 static inline void *nvme_addr_to_cmb(NvmeCtrl
*n
, hwaddr addr
)
140 assert(nvme_addr_is_cmb(n
, addr
));
142 return &n
->cmbuf
[addr
- n
->ctrl_mem
.addr
];
145 static int nvme_addr_read(NvmeCtrl
*n
, hwaddr addr
, void *buf
, int size
)
147 hwaddr hi
= addr
+ size
- 1;
152 if (n
->bar
.cmbsz
&& nvme_addr_is_cmb(n
, addr
) && nvme_addr_is_cmb(n
, hi
)) {
153 memcpy(buf
, nvme_addr_to_cmb(n
, addr
), size
);
157 return pci_dma_read(&n
->parent_obj
, addr
, buf
, size
);
160 static bool nvme_nsid_valid(NvmeCtrl
*n
, uint32_t nsid
)
162 return nsid
&& (nsid
== NVME_NSID_BROADCAST
|| nsid
<= n
->num_namespaces
);
165 static int nvme_check_sqid(NvmeCtrl
*n
, uint16_t sqid
)
167 return sqid
< n
->params
.max_ioqpairs
+ 1 && n
->sq
[sqid
] != NULL
? 0 : -1;
170 static int nvme_check_cqid(NvmeCtrl
*n
, uint16_t cqid
)
172 return cqid
< n
->params
.max_ioqpairs
+ 1 && n
->cq
[cqid
] != NULL
? 0 : -1;
175 static void nvme_inc_cq_tail(NvmeCQueue
*cq
)
178 if (cq
->tail
>= cq
->size
) {
180 cq
->phase
= !cq
->phase
;
184 static void nvme_inc_sq_head(NvmeSQueue
*sq
)
186 sq
->head
= (sq
->head
+ 1) % sq
->size
;
189 static uint8_t nvme_cq_full(NvmeCQueue
*cq
)
191 return (cq
->tail
+ 1) % cq
->size
== cq
->head
;
194 static uint8_t nvme_sq_empty(NvmeSQueue
*sq
)
196 return sq
->head
== sq
->tail
;
199 static void nvme_irq_check(NvmeCtrl
*n
)
201 if (msix_enabled(&(n
->parent_obj
))) {
204 if (~n
->bar
.intms
& n
->irq_status
) {
205 pci_irq_assert(&n
->parent_obj
);
207 pci_irq_deassert(&n
->parent_obj
);
211 static void nvme_irq_assert(NvmeCtrl
*n
, NvmeCQueue
*cq
)
213 if (cq
->irq_enabled
) {
214 if (msix_enabled(&(n
->parent_obj
))) {
215 trace_pci_nvme_irq_msix(cq
->vector
);
216 msix_notify(&(n
->parent_obj
), cq
->vector
);
218 trace_pci_nvme_irq_pin();
219 assert(cq
->vector
< 32);
220 n
->irq_status
|= 1 << cq
->vector
;
224 trace_pci_nvme_irq_masked();
228 static void nvme_irq_deassert(NvmeCtrl
*n
, NvmeCQueue
*cq
)
230 if (cq
->irq_enabled
) {
231 if (msix_enabled(&(n
->parent_obj
))) {
234 assert(cq
->vector
< 32);
235 n
->irq_status
&= ~(1 << cq
->vector
);
241 static void nvme_req_clear(NvmeRequest
*req
)
244 memset(&req
->cqe
, 0x0, sizeof(req
->cqe
));
245 req
->status
= NVME_SUCCESS
;
248 static void nvme_req_exit(NvmeRequest
*req
)
251 qemu_sglist_destroy(&req
->qsg
);
255 qemu_iovec_destroy(&req
->iov
);
259 static uint16_t nvme_map_addr_cmb(NvmeCtrl
*n
, QEMUIOVector
*iov
, hwaddr addr
,
266 trace_pci_nvme_map_addr_cmb(addr
, len
);
268 if (!nvme_addr_is_cmb(n
, addr
) || !nvme_addr_is_cmb(n
, addr
+ len
- 1)) {
269 return NVME_DATA_TRAS_ERROR
;
272 qemu_iovec_add(iov
, nvme_addr_to_cmb(n
, addr
), len
);
277 static uint16_t nvme_map_addr(NvmeCtrl
*n
, QEMUSGList
*qsg
, QEMUIOVector
*iov
,
278 hwaddr addr
, size_t len
)
284 trace_pci_nvme_map_addr(addr
, len
);
286 if (nvme_addr_is_cmb(n
, addr
)) {
287 if (qsg
&& qsg
->sg
) {
288 return NVME_INVALID_USE_OF_CMB
| NVME_DNR
;
294 qemu_iovec_init(iov
, 1);
297 return nvme_map_addr_cmb(n
, iov
, addr
, len
);
300 if (iov
&& iov
->iov
) {
301 return NVME_INVALID_USE_OF_CMB
| NVME_DNR
;
307 pci_dma_sglist_init(qsg
, &n
->parent_obj
, 1);
310 qemu_sglist_add(qsg
, addr
, len
);
315 static uint16_t nvme_map_prp(NvmeCtrl
*n
, uint64_t prp1
, uint64_t prp2
,
316 uint32_t len
, NvmeRequest
*req
)
318 hwaddr trans_len
= n
->page_size
- (prp1
% n
->page_size
);
319 trans_len
= MIN(len
, trans_len
);
320 int num_prps
= (len
>> n
->page_bits
) + 1;
322 bool prp_list_in_cmb
= false;
325 QEMUSGList
*qsg
= &req
->qsg
;
326 QEMUIOVector
*iov
= &req
->iov
;
328 trace_pci_nvme_map_prp(trans_len
, len
, prp1
, prp2
, num_prps
);
330 if (nvme_addr_is_cmb(n
, prp1
)) {
331 qemu_iovec_init(iov
, num_prps
);
333 pci_dma_sglist_init(qsg
, &n
->parent_obj
, num_prps
);
336 status
= nvme_map_addr(n
, qsg
, iov
, prp1
, trans_len
);
343 if (len
> n
->page_size
) {
344 uint64_t prp_list
[n
->max_prp_ents
];
345 uint32_t nents
, prp_trans
;
348 if (nvme_addr_is_cmb(n
, prp2
)) {
349 prp_list_in_cmb
= true;
352 nents
= (len
+ n
->page_size
- 1) >> n
->page_bits
;
353 prp_trans
= MIN(n
->max_prp_ents
, nents
) * sizeof(uint64_t);
354 ret
= nvme_addr_read(n
, prp2
, (void *)prp_list
, prp_trans
);
356 trace_pci_nvme_err_addr_read(prp2
);
357 return NVME_DATA_TRAS_ERROR
;
360 uint64_t prp_ent
= le64_to_cpu(prp_list
[i
]);
362 if (i
== n
->max_prp_ents
- 1 && len
> n
->page_size
) {
363 if (unlikely(prp_ent
& (n
->page_size
- 1))) {
364 trace_pci_nvme_err_invalid_prplist_ent(prp_ent
);
365 return NVME_INVALID_PRP_OFFSET
| NVME_DNR
;
368 if (prp_list_in_cmb
!= nvme_addr_is_cmb(n
, prp_ent
)) {
369 return NVME_INVALID_USE_OF_CMB
| NVME_DNR
;
373 nents
= (len
+ n
->page_size
- 1) >> n
->page_bits
;
374 prp_trans
= MIN(n
->max_prp_ents
, nents
) * sizeof(uint64_t);
375 ret
= nvme_addr_read(n
, prp_ent
, (void *)prp_list
,
378 trace_pci_nvme_err_addr_read(prp_ent
);
379 return NVME_DATA_TRAS_ERROR
;
381 prp_ent
= le64_to_cpu(prp_list
[i
]);
384 if (unlikely(prp_ent
& (n
->page_size
- 1))) {
385 trace_pci_nvme_err_invalid_prplist_ent(prp_ent
);
386 return NVME_INVALID_PRP_OFFSET
| NVME_DNR
;
389 trans_len
= MIN(len
, n
->page_size
);
390 status
= nvme_map_addr(n
, qsg
, iov
, prp_ent
, trans_len
);
399 if (unlikely(prp2
& (n
->page_size
- 1))) {
400 trace_pci_nvme_err_invalid_prp2_align(prp2
);
401 return NVME_INVALID_PRP_OFFSET
| NVME_DNR
;
403 status
= nvme_map_addr(n
, qsg
, iov
, prp2
, len
);
414 * Map 'nsgld' data descriptors from 'segment'. The function will subtract the
415 * number of bytes mapped in len.
417 static uint16_t nvme_map_sgl_data(NvmeCtrl
*n
, QEMUSGList
*qsg
,
419 NvmeSglDescriptor
*segment
, uint64_t nsgld
,
420 size_t *len
, NvmeRequest
*req
)
422 dma_addr_t addr
, trans_len
;
426 for (int i
= 0; i
< nsgld
; i
++) {
427 uint8_t type
= NVME_SGL_TYPE(segment
[i
].type
);
430 case NVME_SGL_DESCR_TYPE_BIT_BUCKET
:
431 if (req
->cmd
.opcode
== NVME_CMD_WRITE
) {
434 case NVME_SGL_DESCR_TYPE_DATA_BLOCK
:
436 case NVME_SGL_DESCR_TYPE_SEGMENT
:
437 case NVME_SGL_DESCR_TYPE_LAST_SEGMENT
:
438 return NVME_INVALID_NUM_SGL_DESCRS
| NVME_DNR
;
440 return NVME_SGL_DESCR_TYPE_INVALID
| NVME_DNR
;
443 dlen
= le32_to_cpu(segment
[i
].len
);
451 * All data has been mapped, but the SGL contains additional
452 * segments and/or descriptors. The controller might accept
453 * ignoring the rest of the SGL.
455 uint32_t sgls
= le32_to_cpu(n
->id_ctrl
.sgls
);
456 if (sgls
& NVME_CTRL_SGLS_EXCESS_LENGTH
) {
460 trace_pci_nvme_err_invalid_sgl_excess_length(nvme_cid(req
));
461 return NVME_DATA_SGL_LEN_INVALID
| NVME_DNR
;
464 trans_len
= MIN(*len
, dlen
);
466 if (type
== NVME_SGL_DESCR_TYPE_BIT_BUCKET
) {
470 addr
= le64_to_cpu(segment
[i
].addr
);
472 if (UINT64_MAX
- addr
< dlen
) {
473 return NVME_DATA_SGL_LEN_INVALID
| NVME_DNR
;
476 status
= nvme_map_addr(n
, qsg
, iov
, addr
, trans_len
);
488 static uint16_t nvme_map_sgl(NvmeCtrl
*n
, QEMUSGList
*qsg
, QEMUIOVector
*iov
,
489 NvmeSglDescriptor sgl
, size_t len
,
493 * Read the segment in chunks of 256 descriptors (one 4k page) to avoid
494 * dynamically allocating a potentially huge SGL. The spec allows the SGL
495 * to be larger (as in number of bytes required to describe the SGL
496 * descriptors and segment chain) than the command transfer size, so it is
497 * not bounded by MDTS.
499 const int SEG_CHUNK_SIZE
= 256;
501 NvmeSglDescriptor segment
[SEG_CHUNK_SIZE
], *sgld
, *last_sgld
;
505 bool sgl_in_cmb
= false;
510 addr
= le64_to_cpu(sgl
.addr
);
512 trace_pci_nvme_map_sgl(nvme_cid(req
), NVME_SGL_TYPE(sgl
.type
), len
);
515 * If the entire transfer can be described with a single data block it can
516 * be mapped directly.
518 if (NVME_SGL_TYPE(sgl
.type
) == NVME_SGL_DESCR_TYPE_DATA_BLOCK
) {
519 status
= nvme_map_sgl_data(n
, qsg
, iov
, sgld
, 1, &len
, req
);
528 * If the segment is located in the CMB, the submission queue of the
529 * request must also reside there.
531 if (nvme_addr_is_cmb(n
, addr
)) {
532 if (!nvme_addr_is_cmb(n
, req
->sq
->dma_addr
)) {
533 return NVME_INVALID_USE_OF_CMB
| NVME_DNR
;
540 switch (NVME_SGL_TYPE(sgld
->type
)) {
541 case NVME_SGL_DESCR_TYPE_SEGMENT
:
542 case NVME_SGL_DESCR_TYPE_LAST_SEGMENT
:
545 return NVME_INVALID_SGL_SEG_DESCR
| NVME_DNR
;
548 seg_len
= le32_to_cpu(sgld
->len
);
550 /* check the length of the (Last) Segment descriptor */
551 if ((!seg_len
|| seg_len
& 0xf) &&
552 (NVME_SGL_TYPE(sgld
->type
) != NVME_SGL_DESCR_TYPE_BIT_BUCKET
)) {
553 return NVME_INVALID_SGL_SEG_DESCR
| NVME_DNR
;
556 if (UINT64_MAX
- addr
< seg_len
) {
557 return NVME_DATA_SGL_LEN_INVALID
| NVME_DNR
;
560 nsgld
= seg_len
/ sizeof(NvmeSglDescriptor
);
562 while (nsgld
> SEG_CHUNK_SIZE
) {
563 if (nvme_addr_read(n
, addr
, segment
, sizeof(segment
))) {
564 trace_pci_nvme_err_addr_read(addr
);
565 status
= NVME_DATA_TRAS_ERROR
;
569 status
= nvme_map_sgl_data(n
, qsg
, iov
, segment
, SEG_CHUNK_SIZE
,
575 nsgld
-= SEG_CHUNK_SIZE
;
576 addr
+= SEG_CHUNK_SIZE
* sizeof(NvmeSglDescriptor
);
579 ret
= nvme_addr_read(n
, addr
, segment
, nsgld
*
580 sizeof(NvmeSglDescriptor
));
582 trace_pci_nvme_err_addr_read(addr
);
583 status
= NVME_DATA_TRAS_ERROR
;
587 last_sgld
= &segment
[nsgld
- 1];
590 * If the segment ends with a Data Block or Bit Bucket Descriptor Type,
593 switch (NVME_SGL_TYPE(last_sgld
->type
)) {
594 case NVME_SGL_DESCR_TYPE_DATA_BLOCK
:
595 case NVME_SGL_DESCR_TYPE_BIT_BUCKET
:
596 status
= nvme_map_sgl_data(n
, qsg
, iov
, segment
, nsgld
, &len
, req
);
608 * If the last descriptor was not a Data Block or Bit Bucket, then the
609 * current segment must not be a Last Segment.
611 if (NVME_SGL_TYPE(sgld
->type
) == NVME_SGL_DESCR_TYPE_LAST_SEGMENT
) {
612 status
= NVME_INVALID_SGL_SEG_DESCR
| NVME_DNR
;
617 addr
= le64_to_cpu(sgld
->addr
);
620 * Do not map the last descriptor; it will be a Segment or Last Segment
621 * descriptor and is handled by the next iteration.
623 status
= nvme_map_sgl_data(n
, qsg
, iov
, segment
, nsgld
- 1, &len
, req
);
629 * If the next segment is in the CMB, make sure that the sgl was
630 * already located there.
632 if (sgl_in_cmb
!= nvme_addr_is_cmb(n
, addr
)) {
633 status
= NVME_INVALID_USE_OF_CMB
| NVME_DNR
;
639 /* if there is any residual left in len, the SGL was too short */
641 status
= NVME_DATA_SGL_LEN_INVALID
| NVME_DNR
;
649 qemu_iovec_destroy(iov
);
653 qemu_sglist_destroy(qsg
);
659 static uint16_t nvme_map_dptr(NvmeCtrl
*n
, size_t len
, NvmeRequest
*req
)
663 switch (NVME_CMD_FLAGS_PSDT(req
->cmd
.flags
)) {
665 prp1
= le64_to_cpu(req
->cmd
.dptr
.prp1
);
666 prp2
= le64_to_cpu(req
->cmd
.dptr
.prp2
);
668 return nvme_map_prp(n
, prp1
, prp2
, len
, req
);
669 case NVME_PSDT_SGL_MPTR_CONTIGUOUS
:
670 case NVME_PSDT_SGL_MPTR_SGL
:
671 /* SGLs shall not be used for Admin commands in NVMe over PCIe */
672 if (!req
->sq
->sqid
) {
673 return NVME_INVALID_FIELD
| NVME_DNR
;
676 return nvme_map_sgl(n
, &req
->qsg
, &req
->iov
, req
->cmd
.dptr
.sgl
, len
,
679 return NVME_INVALID_FIELD
;
683 static uint16_t nvme_dma(NvmeCtrl
*n
, uint8_t *ptr
, uint32_t len
,
684 DMADirection dir
, NvmeRequest
*req
)
686 uint16_t status
= NVME_SUCCESS
;
688 status
= nvme_map_dptr(n
, len
, req
);
693 /* assert that only one of qsg and iov carries data */
694 assert((req
->qsg
.nsg
> 0) != (req
->iov
.niov
> 0));
696 if (req
->qsg
.nsg
> 0) {
699 if (dir
== DMA_DIRECTION_TO_DEVICE
) {
700 residual
= dma_buf_write(ptr
, len
, &req
->qsg
);
702 residual
= dma_buf_read(ptr
, len
, &req
->qsg
);
705 if (unlikely(residual
)) {
706 trace_pci_nvme_err_invalid_dma();
707 status
= NVME_INVALID_FIELD
| NVME_DNR
;
712 if (dir
== DMA_DIRECTION_TO_DEVICE
) {
713 bytes
= qemu_iovec_to_buf(&req
->iov
, 0, ptr
, len
);
715 bytes
= qemu_iovec_from_buf(&req
->iov
, 0, ptr
, len
);
718 if (unlikely(bytes
!= len
)) {
719 trace_pci_nvme_err_invalid_dma();
720 status
= NVME_INVALID_FIELD
| NVME_DNR
;
727 static void nvme_post_cqes(void *opaque
)
729 NvmeCQueue
*cq
= opaque
;
730 NvmeCtrl
*n
= cq
->ctrl
;
731 NvmeRequest
*req
, *next
;
734 QTAILQ_FOREACH_SAFE(req
, &cq
->req_list
, entry
, next
) {
738 if (nvme_cq_full(cq
)) {
743 req
->cqe
.status
= cpu_to_le16((req
->status
<< 1) | cq
->phase
);
744 req
->cqe
.sq_id
= cpu_to_le16(sq
->sqid
);
745 req
->cqe
.sq_head
= cpu_to_le16(sq
->head
);
746 addr
= cq
->dma_addr
+ cq
->tail
* n
->cqe_size
;
747 ret
= pci_dma_write(&n
->parent_obj
, addr
, (void *)&req
->cqe
,
750 trace_pci_nvme_err_addr_write(addr
);
751 trace_pci_nvme_err_cfs();
752 n
->bar
.csts
= NVME_CSTS_FAILED
;
755 QTAILQ_REMOVE(&cq
->req_list
, req
, entry
);
756 nvme_inc_cq_tail(cq
);
758 QTAILQ_INSERT_TAIL(&sq
->req_list
, req
, entry
);
760 if (cq
->tail
!= cq
->head
) {
761 nvme_irq_assert(n
, cq
);
765 static void nvme_enqueue_req_completion(NvmeCQueue
*cq
, NvmeRequest
*req
)
767 assert(cq
->cqid
== req
->sq
->cqid
);
768 trace_pci_nvme_enqueue_req_completion(nvme_cid(req
), cq
->cqid
,
772 trace_pci_nvme_err_req_status(nvme_cid(req
), nvme_nsid(req
->ns
),
773 req
->status
, req
->cmd
.opcode
);
776 QTAILQ_REMOVE(&req
->sq
->out_req_list
, req
, entry
);
777 QTAILQ_INSERT_TAIL(&cq
->req_list
, req
, entry
);
778 timer_mod(cq
->timer
, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) + 500);
781 static void nvme_process_aers(void *opaque
)
783 NvmeCtrl
*n
= opaque
;
784 NvmeAsyncEvent
*event
, *next
;
786 trace_pci_nvme_process_aers(n
->aer_queued
);
788 QTAILQ_FOREACH_SAFE(event
, &n
->aer_queue
, entry
, next
) {
790 NvmeAerResult
*result
;
792 /* can't post cqe if there is nothing to complete */
793 if (!n
->outstanding_aers
) {
794 trace_pci_nvme_no_outstanding_aers();
798 /* ignore if masked (cqe posted, but event not cleared) */
799 if (n
->aer_mask
& (1 << event
->result
.event_type
)) {
800 trace_pci_nvme_aer_masked(event
->result
.event_type
, n
->aer_mask
);
804 QTAILQ_REMOVE(&n
->aer_queue
, event
, entry
);
807 n
->aer_mask
|= 1 << event
->result
.event_type
;
808 n
->outstanding_aers
--;
810 req
= n
->aer_reqs
[n
->outstanding_aers
];
812 result
= (NvmeAerResult
*) &req
->cqe
.result
;
813 result
->event_type
= event
->result
.event_type
;
814 result
->event_info
= event
->result
.event_info
;
815 result
->log_page
= event
->result
.log_page
;
818 trace_pci_nvme_aer_post_cqe(result
->event_type
, result
->event_info
,
821 nvme_enqueue_req_completion(&n
->admin_cq
, req
);
825 static void nvme_enqueue_event(NvmeCtrl
*n
, uint8_t event_type
,
826 uint8_t event_info
, uint8_t log_page
)
828 NvmeAsyncEvent
*event
;
830 trace_pci_nvme_enqueue_event(event_type
, event_info
, log_page
);
832 if (n
->aer_queued
== n
->params
.aer_max_queued
) {
833 trace_pci_nvme_enqueue_event_noqueue(n
->aer_queued
);
837 event
= g_new(NvmeAsyncEvent
, 1);
838 event
->result
= (NvmeAerResult
) {
839 .event_type
= event_type
,
840 .event_info
= event_info
,
841 .log_page
= log_page
,
844 QTAILQ_INSERT_TAIL(&n
->aer_queue
, event
, entry
);
847 nvme_process_aers(n
);
850 static void nvme_clear_events(NvmeCtrl
*n
, uint8_t event_type
)
852 n
->aer_mask
&= ~(1 << event_type
);
853 if (!QTAILQ_EMPTY(&n
->aer_queue
)) {
854 nvme_process_aers(n
);
858 static inline uint16_t nvme_check_mdts(NvmeCtrl
*n
, size_t len
)
860 uint8_t mdts
= n
->params
.mdts
;
862 if (mdts
&& len
> n
->page_size
<< mdts
) {
863 return NVME_INVALID_FIELD
| NVME_DNR
;
869 static inline uint16_t nvme_check_bounds(NvmeCtrl
*n
, NvmeNamespace
*ns
,
870 uint64_t slba
, uint32_t nlb
)
872 uint64_t nsze
= le64_to_cpu(ns
->id_ns
.nsze
);
874 if (unlikely(UINT64_MAX
- slba
< nlb
|| slba
+ nlb
> nsze
)) {
875 return NVME_LBA_RANGE
| NVME_DNR
;
881 static void nvme_rw_cb(void *opaque
, int ret
)
883 NvmeRequest
*req
= opaque
;
884 NvmeNamespace
*ns
= req
->ns
;
886 BlockBackend
*blk
= ns
->blkconf
.blk
;
887 BlockAcctCookie
*acct
= &req
->acct
;
888 BlockAcctStats
*stats
= blk_get_stats(blk
);
890 Error
*local_err
= NULL
;
892 trace_pci_nvme_rw_cb(nvme_cid(req
), blk_name(blk
));
895 block_acct_done(stats
, acct
);
899 block_acct_failed(stats
, acct
);
901 switch (req
->cmd
.opcode
) {
903 status
= NVME_UNRECOVERED_READ
;
907 case NVME_CMD_WRITE_ZEROES
:
908 status
= NVME_WRITE_FAULT
;
911 status
= NVME_INTERNAL_DEV_ERROR
;
915 trace_pci_nvme_err_aio(nvme_cid(req
), strerror(ret
), status
);
917 error_setg_errno(&local_err
, -ret
, "aio failed");
918 error_report_err(local_err
);
920 req
->status
= status
;
923 nvme_enqueue_req_completion(nvme_cq(req
), req
);
926 static uint16_t nvme_flush(NvmeCtrl
*n
, NvmeRequest
*req
)
928 block_acct_start(blk_get_stats(req
->ns
->blkconf
.blk
), &req
->acct
, 0,
930 req
->aiocb
= blk_aio_flush(req
->ns
->blkconf
.blk
, nvme_rw_cb
, req
);
931 return NVME_NO_COMPLETE
;
934 static uint16_t nvme_write_zeroes(NvmeCtrl
*n
, NvmeRequest
*req
)
936 NvmeRwCmd
*rw
= (NvmeRwCmd
*)&req
->cmd
;
937 NvmeNamespace
*ns
= req
->ns
;
938 uint64_t slba
= le64_to_cpu(rw
->slba
);
939 uint32_t nlb
= (uint32_t)le16_to_cpu(rw
->nlb
) + 1;
940 uint64_t offset
= nvme_l2b(ns
, slba
);
941 uint32_t count
= nvme_l2b(ns
, nlb
);
944 trace_pci_nvme_write_zeroes(nvme_cid(req
), nvme_nsid(ns
), slba
, nlb
);
946 status
= nvme_check_bounds(n
, ns
, slba
, nlb
);
948 trace_pci_nvme_err_invalid_lba_range(slba
, nlb
, ns
->id_ns
.nsze
);
952 block_acct_start(blk_get_stats(req
->ns
->blkconf
.blk
), &req
->acct
, 0,
954 req
->aiocb
= blk_aio_pwrite_zeroes(req
->ns
->blkconf
.blk
, offset
, count
,
955 BDRV_REQ_MAY_UNMAP
, nvme_rw_cb
, req
);
956 return NVME_NO_COMPLETE
;
959 static uint16_t nvme_rw(NvmeCtrl
*n
, NvmeRequest
*req
)
961 NvmeRwCmd
*rw
= (NvmeRwCmd
*)&req
->cmd
;
962 NvmeNamespace
*ns
= req
->ns
;
963 uint32_t nlb
= (uint32_t)le16_to_cpu(rw
->nlb
) + 1;
964 uint64_t slba
= le64_to_cpu(rw
->slba
);
966 uint64_t data_size
= nvme_l2b(ns
, nlb
);
967 uint64_t data_offset
= nvme_l2b(ns
, slba
);
968 enum BlockAcctType acct
= req
->cmd
.opcode
== NVME_CMD_WRITE
?
969 BLOCK_ACCT_WRITE
: BLOCK_ACCT_READ
;
970 BlockBackend
*blk
= ns
->blkconf
.blk
;
973 trace_pci_nvme_rw(nvme_cid(req
), nvme_io_opc_str(rw
->opcode
),
974 nvme_nsid(ns
), nlb
, data_size
, slba
);
976 status
= nvme_check_mdts(n
, data_size
);
978 trace_pci_nvme_err_mdts(nvme_cid(req
), data_size
);
982 status
= nvme_check_bounds(n
, ns
, slba
, nlb
);
984 trace_pci_nvme_err_invalid_lba_range(slba
, nlb
, ns
->id_ns
.nsze
);
988 status
= nvme_map_dptr(n
, data_size
, req
);
993 block_acct_start(blk_get_stats(blk
), &req
->acct
, data_size
, acct
);
995 if (acct
== BLOCK_ACCT_WRITE
) {
996 req
->aiocb
= dma_blk_write(blk
, &req
->qsg
, data_offset
,
997 BDRV_SECTOR_SIZE
, nvme_rw_cb
, req
);
999 req
->aiocb
= dma_blk_read(blk
, &req
->qsg
, data_offset
,
1000 BDRV_SECTOR_SIZE
, nvme_rw_cb
, req
);
1003 if (acct
== BLOCK_ACCT_WRITE
) {
1004 req
->aiocb
= blk_aio_pwritev(blk
, data_offset
, &req
->iov
, 0,
1007 req
->aiocb
= blk_aio_preadv(blk
, data_offset
, &req
->iov
, 0,
1011 return NVME_NO_COMPLETE
;
1014 block_acct_invalid(blk_get_stats(ns
->blkconf
.blk
), acct
);
1018 static uint16_t nvme_io_cmd(NvmeCtrl
*n
, NvmeRequest
*req
)
1020 uint32_t nsid
= le32_to_cpu(req
->cmd
.nsid
);
1022 trace_pci_nvme_io_cmd(nvme_cid(req
), nsid
, nvme_sqid(req
),
1023 req
->cmd
.opcode
, nvme_io_opc_str(req
->cmd
.opcode
));
1025 if (NVME_CC_CSS(n
->bar
.cc
) == NVME_CC_CSS_ADMIN_ONLY
) {
1026 return NVME_INVALID_OPCODE
| NVME_DNR
;
1029 if (!nvme_nsid_valid(n
, nsid
)) {
1030 return NVME_INVALID_NSID
| NVME_DNR
;
1033 req
->ns
= nvme_ns(n
, nsid
);
1034 if (unlikely(!req
->ns
)) {
1035 return NVME_INVALID_FIELD
| NVME_DNR
;
1038 switch (req
->cmd
.opcode
) {
1039 case NVME_CMD_FLUSH
:
1040 return nvme_flush(n
, req
);
1041 case NVME_CMD_WRITE_ZEROES
:
1042 return nvme_write_zeroes(n
, req
);
1043 case NVME_CMD_WRITE
:
1045 return nvme_rw(n
, req
);
1047 trace_pci_nvme_err_invalid_opc(req
->cmd
.opcode
);
1048 return NVME_INVALID_OPCODE
| NVME_DNR
;
1052 static void nvme_free_sq(NvmeSQueue
*sq
, NvmeCtrl
*n
)
1054 n
->sq
[sq
->sqid
] = NULL
;
1055 timer_del(sq
->timer
);
1056 timer_free(sq
->timer
);
1063 static uint16_t nvme_del_sq(NvmeCtrl
*n
, NvmeRequest
*req
)
1065 NvmeDeleteQ
*c
= (NvmeDeleteQ
*)&req
->cmd
;
1066 NvmeRequest
*r
, *next
;
1069 uint16_t qid
= le16_to_cpu(c
->qid
);
1071 if (unlikely(!qid
|| nvme_check_sqid(n
, qid
))) {
1072 trace_pci_nvme_err_invalid_del_sq(qid
);
1073 return NVME_INVALID_QID
| NVME_DNR
;
1076 trace_pci_nvme_del_sq(qid
);
1079 while (!QTAILQ_EMPTY(&sq
->out_req_list
)) {
1080 r
= QTAILQ_FIRST(&sq
->out_req_list
);
1082 blk_aio_cancel(r
->aiocb
);
1084 if (!nvme_check_cqid(n
, sq
->cqid
)) {
1085 cq
= n
->cq
[sq
->cqid
];
1086 QTAILQ_REMOVE(&cq
->sq_list
, sq
, entry
);
1089 QTAILQ_FOREACH_SAFE(r
, &cq
->req_list
, entry
, next
) {
1091 QTAILQ_REMOVE(&cq
->req_list
, r
, entry
);
1092 QTAILQ_INSERT_TAIL(&sq
->req_list
, r
, entry
);
1097 nvme_free_sq(sq
, n
);
1098 return NVME_SUCCESS
;
1101 static void nvme_init_sq(NvmeSQueue
*sq
, NvmeCtrl
*n
, uint64_t dma_addr
,
1102 uint16_t sqid
, uint16_t cqid
, uint16_t size
)
1108 sq
->dma_addr
= dma_addr
;
1112 sq
->head
= sq
->tail
= 0;
1113 sq
->io_req
= g_new0(NvmeRequest
, sq
->size
);
1115 QTAILQ_INIT(&sq
->req_list
);
1116 QTAILQ_INIT(&sq
->out_req_list
);
1117 for (i
= 0; i
< sq
->size
; i
++) {
1118 sq
->io_req
[i
].sq
= sq
;
1119 QTAILQ_INSERT_TAIL(&(sq
->req_list
), &sq
->io_req
[i
], entry
);
1121 sq
->timer
= timer_new_ns(QEMU_CLOCK_VIRTUAL
, nvme_process_sq
, sq
);
1123 assert(n
->cq
[cqid
]);
1125 QTAILQ_INSERT_TAIL(&(cq
->sq_list
), sq
, entry
);
1129 static uint16_t nvme_create_sq(NvmeCtrl
*n
, NvmeRequest
*req
)
1132 NvmeCreateSq
*c
= (NvmeCreateSq
*)&req
->cmd
;
1134 uint16_t cqid
= le16_to_cpu(c
->cqid
);
1135 uint16_t sqid
= le16_to_cpu(c
->sqid
);
1136 uint16_t qsize
= le16_to_cpu(c
->qsize
);
1137 uint16_t qflags
= le16_to_cpu(c
->sq_flags
);
1138 uint64_t prp1
= le64_to_cpu(c
->prp1
);
1140 trace_pci_nvme_create_sq(prp1
, sqid
, cqid
, qsize
, qflags
);
1142 if (unlikely(!cqid
|| nvme_check_cqid(n
, cqid
))) {
1143 trace_pci_nvme_err_invalid_create_sq_cqid(cqid
);
1144 return NVME_INVALID_CQID
| NVME_DNR
;
1146 if (unlikely(!sqid
|| sqid
> n
->params
.max_ioqpairs
||
1147 n
->sq
[sqid
] != NULL
)) {
1148 trace_pci_nvme_err_invalid_create_sq_sqid(sqid
);
1149 return NVME_INVALID_QID
| NVME_DNR
;
1151 if (unlikely(!qsize
|| qsize
> NVME_CAP_MQES(n
->bar
.cap
))) {
1152 trace_pci_nvme_err_invalid_create_sq_size(qsize
);
1153 return NVME_MAX_QSIZE_EXCEEDED
| NVME_DNR
;
1155 if (unlikely(prp1
& (n
->page_size
- 1))) {
1156 trace_pci_nvme_err_invalid_create_sq_addr(prp1
);
1157 return NVME_INVALID_PRP_OFFSET
| NVME_DNR
;
1159 if (unlikely(!(NVME_SQ_FLAGS_PC(qflags
)))) {
1160 trace_pci_nvme_err_invalid_create_sq_qflags(NVME_SQ_FLAGS_PC(qflags
));
1161 return NVME_INVALID_FIELD
| NVME_DNR
;
1163 sq
= g_malloc0(sizeof(*sq
));
1164 nvme_init_sq(sq
, n
, prp1
, sqid
, cqid
, qsize
+ 1);
1165 return NVME_SUCCESS
;
1169 uint64_t units_read
;
1170 uint64_t units_written
;
1171 uint64_t read_commands
;
1172 uint64_t write_commands
;
1175 static void nvme_set_blk_stats(NvmeNamespace
*ns
, struct nvme_stats
*stats
)
1177 BlockAcctStats
*s
= blk_get_stats(ns
->blkconf
.blk
);
1179 stats
->units_read
+= s
->nr_bytes
[BLOCK_ACCT_READ
] >> BDRV_SECTOR_BITS
;
1180 stats
->units_written
+= s
->nr_bytes
[BLOCK_ACCT_WRITE
] >> BDRV_SECTOR_BITS
;
1181 stats
->read_commands
+= s
->nr_ops
[BLOCK_ACCT_READ
];
1182 stats
->write_commands
+= s
->nr_ops
[BLOCK_ACCT_WRITE
];
1185 static uint16_t nvme_smart_info(NvmeCtrl
*n
, uint8_t rae
, uint32_t buf_len
,
1186 uint64_t off
, NvmeRequest
*req
)
1188 uint32_t nsid
= le32_to_cpu(req
->cmd
.nsid
);
1189 struct nvme_stats stats
= { 0 };
1190 NvmeSmartLog smart
= { 0 };
1195 if (off
>= sizeof(smart
)) {
1196 return NVME_INVALID_FIELD
| NVME_DNR
;
1199 if (nsid
!= 0xffffffff) {
1200 ns
= nvme_ns(n
, nsid
);
1202 return NVME_INVALID_NSID
| NVME_DNR
;
1204 nvme_set_blk_stats(ns
, &stats
);
1208 for (i
= 1; i
<= n
->num_namespaces
; i
++) {
1213 nvme_set_blk_stats(ns
, &stats
);
1217 trans_len
= MIN(sizeof(smart
) - off
, buf_len
);
1219 smart
.data_units_read
[0] = cpu_to_le64(DIV_ROUND_UP(stats
.units_read
,
1221 smart
.data_units_written
[0] = cpu_to_le64(DIV_ROUND_UP(stats
.units_written
,
1223 smart
.host_read_commands
[0] = cpu_to_le64(stats
.read_commands
);
1224 smart
.host_write_commands
[0] = cpu_to_le64(stats
.write_commands
);
1226 smart
.temperature
= cpu_to_le16(n
->temperature
);
1228 if ((n
->temperature
>= n
->features
.temp_thresh_hi
) ||
1229 (n
->temperature
<= n
->features
.temp_thresh_low
)) {
1230 smart
.critical_warning
|= NVME_SMART_TEMPERATURE
;
1233 current_ms
= qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
);
1234 smart
.power_on_hours
[0] =
1235 cpu_to_le64((((current_ms
- n
->starttime_ms
) / 1000) / 60) / 60);
1238 nvme_clear_events(n
, NVME_AER_TYPE_SMART
);
1241 return nvme_dma(n
, (uint8_t *) &smart
+ off
, trans_len
,
1242 DMA_DIRECTION_FROM_DEVICE
, req
);
1245 static uint16_t nvme_fw_log_info(NvmeCtrl
*n
, uint32_t buf_len
, uint64_t off
,
1249 NvmeFwSlotInfoLog fw_log
= {
1253 if (off
>= sizeof(fw_log
)) {
1254 return NVME_INVALID_FIELD
| NVME_DNR
;
1257 strpadcpy((char *)&fw_log
.frs1
, sizeof(fw_log
.frs1
), "1.0", ' ');
1258 trans_len
= MIN(sizeof(fw_log
) - off
, buf_len
);
1260 return nvme_dma(n
, (uint8_t *) &fw_log
+ off
, trans_len
,
1261 DMA_DIRECTION_FROM_DEVICE
, req
);
1264 static uint16_t nvme_error_info(NvmeCtrl
*n
, uint8_t rae
, uint32_t buf_len
,
1265 uint64_t off
, NvmeRequest
*req
)
1268 NvmeErrorLog errlog
;
1270 if (off
>= sizeof(errlog
)) {
1271 return NVME_INVALID_FIELD
| NVME_DNR
;
1275 nvme_clear_events(n
, NVME_AER_TYPE_ERROR
);
1278 memset(&errlog
, 0x0, sizeof(errlog
));
1279 trans_len
= MIN(sizeof(errlog
) - off
, buf_len
);
1281 return nvme_dma(n
, (uint8_t *)&errlog
, trans_len
,
1282 DMA_DIRECTION_FROM_DEVICE
, req
);
1285 static uint16_t nvme_get_log(NvmeCtrl
*n
, NvmeRequest
*req
)
1287 NvmeCmd
*cmd
= &req
->cmd
;
1289 uint32_t dw10
= le32_to_cpu(cmd
->cdw10
);
1290 uint32_t dw11
= le32_to_cpu(cmd
->cdw11
);
1291 uint32_t dw12
= le32_to_cpu(cmd
->cdw12
);
1292 uint32_t dw13
= le32_to_cpu(cmd
->cdw13
);
1293 uint8_t lid
= dw10
& 0xff;
1294 uint8_t lsp
= (dw10
>> 8) & 0xf;
1295 uint8_t rae
= (dw10
>> 15) & 0x1;
1296 uint32_t numdl
, numdu
;
1297 uint64_t off
, lpol
, lpou
;
1301 numdl
= (dw10
>> 16);
1302 numdu
= (dw11
& 0xffff);
1306 len
= (((numdu
<< 16) | numdl
) + 1) << 2;
1307 off
= (lpou
<< 32ULL) | lpol
;
1310 return NVME_INVALID_FIELD
| NVME_DNR
;
1313 trace_pci_nvme_get_log(nvme_cid(req
), lid
, lsp
, rae
, len
, off
);
1315 status
= nvme_check_mdts(n
, len
);
1317 trace_pci_nvme_err_mdts(nvme_cid(req
), len
);
1322 case NVME_LOG_ERROR_INFO
:
1323 return nvme_error_info(n
, rae
, len
, off
, req
);
1324 case NVME_LOG_SMART_INFO
:
1325 return nvme_smart_info(n
, rae
, len
, off
, req
);
1326 case NVME_LOG_FW_SLOT_INFO
:
1327 return nvme_fw_log_info(n
, len
, off
, req
);
1329 trace_pci_nvme_err_invalid_log_page(nvme_cid(req
), lid
);
1330 return NVME_INVALID_FIELD
| NVME_DNR
;
1334 static void nvme_free_cq(NvmeCQueue
*cq
, NvmeCtrl
*n
)
1336 n
->cq
[cq
->cqid
] = NULL
;
1337 timer_del(cq
->timer
);
1338 timer_free(cq
->timer
);
1339 msix_vector_unuse(&n
->parent_obj
, cq
->vector
);
1345 static uint16_t nvme_del_cq(NvmeCtrl
*n
, NvmeRequest
*req
)
1347 NvmeDeleteQ
*c
= (NvmeDeleteQ
*)&req
->cmd
;
1349 uint16_t qid
= le16_to_cpu(c
->qid
);
1351 if (unlikely(!qid
|| nvme_check_cqid(n
, qid
))) {
1352 trace_pci_nvme_err_invalid_del_cq_cqid(qid
);
1353 return NVME_INVALID_CQID
| NVME_DNR
;
1357 if (unlikely(!QTAILQ_EMPTY(&cq
->sq_list
))) {
1358 trace_pci_nvme_err_invalid_del_cq_notempty(qid
);
1359 return NVME_INVALID_QUEUE_DEL
;
1361 nvme_irq_deassert(n
, cq
);
1362 trace_pci_nvme_del_cq(qid
);
1363 nvme_free_cq(cq
, n
);
1364 return NVME_SUCCESS
;
1367 static void nvme_init_cq(NvmeCQueue
*cq
, NvmeCtrl
*n
, uint64_t dma_addr
,
1368 uint16_t cqid
, uint16_t vector
, uint16_t size
,
1369 uint16_t irq_enabled
)
1373 ret
= msix_vector_use(&n
->parent_obj
, vector
);
1378 cq
->dma_addr
= dma_addr
;
1380 cq
->irq_enabled
= irq_enabled
;
1381 cq
->vector
= vector
;
1382 cq
->head
= cq
->tail
= 0;
1383 QTAILQ_INIT(&cq
->req_list
);
1384 QTAILQ_INIT(&cq
->sq_list
);
1386 cq
->timer
= timer_new_ns(QEMU_CLOCK_VIRTUAL
, nvme_post_cqes
, cq
);
1389 static uint16_t nvme_create_cq(NvmeCtrl
*n
, NvmeRequest
*req
)
1392 NvmeCreateCq
*c
= (NvmeCreateCq
*)&req
->cmd
;
1393 uint16_t cqid
= le16_to_cpu(c
->cqid
);
1394 uint16_t vector
= le16_to_cpu(c
->irq_vector
);
1395 uint16_t qsize
= le16_to_cpu(c
->qsize
);
1396 uint16_t qflags
= le16_to_cpu(c
->cq_flags
);
1397 uint64_t prp1
= le64_to_cpu(c
->prp1
);
1399 trace_pci_nvme_create_cq(prp1
, cqid
, vector
, qsize
, qflags
,
1400 NVME_CQ_FLAGS_IEN(qflags
) != 0);
1402 if (unlikely(!cqid
|| cqid
> n
->params
.max_ioqpairs
||
1403 n
->cq
[cqid
] != NULL
)) {
1404 trace_pci_nvme_err_invalid_create_cq_cqid(cqid
);
1405 return NVME_INVALID_QID
| NVME_DNR
;
1407 if (unlikely(!qsize
|| qsize
> NVME_CAP_MQES(n
->bar
.cap
))) {
1408 trace_pci_nvme_err_invalid_create_cq_size(qsize
);
1409 return NVME_MAX_QSIZE_EXCEEDED
| NVME_DNR
;
1411 if (unlikely(prp1
& (n
->page_size
- 1))) {
1412 trace_pci_nvme_err_invalid_create_cq_addr(prp1
);
1413 return NVME_INVALID_PRP_OFFSET
| NVME_DNR
;
1415 if (unlikely(!msix_enabled(&n
->parent_obj
) && vector
)) {
1416 trace_pci_nvme_err_invalid_create_cq_vector(vector
);
1417 return NVME_INVALID_IRQ_VECTOR
| NVME_DNR
;
1419 if (unlikely(vector
>= n
->params
.msix_qsize
)) {
1420 trace_pci_nvme_err_invalid_create_cq_vector(vector
);
1421 return NVME_INVALID_IRQ_VECTOR
| NVME_DNR
;
1423 if (unlikely(!(NVME_CQ_FLAGS_PC(qflags
)))) {
1424 trace_pci_nvme_err_invalid_create_cq_qflags(NVME_CQ_FLAGS_PC(qflags
));
1425 return NVME_INVALID_FIELD
| NVME_DNR
;
1428 cq
= g_malloc0(sizeof(*cq
));
1429 nvme_init_cq(cq
, n
, prp1
, cqid
, vector
, qsize
+ 1,
1430 NVME_CQ_FLAGS_IEN(qflags
));
1433 * It is only required to set qs_created when creating a completion queue;
1434 * creating a submission queue without a matching completion queue will
1437 n
->qs_created
= true;
1438 return NVME_SUCCESS
;
1441 static uint16_t nvme_identify_ctrl(NvmeCtrl
*n
, NvmeRequest
*req
)
1443 trace_pci_nvme_identify_ctrl();
1445 return nvme_dma(n
, (uint8_t *)&n
->id_ctrl
, sizeof(n
->id_ctrl
),
1446 DMA_DIRECTION_FROM_DEVICE
, req
);
1449 static uint16_t nvme_identify_ns(NvmeCtrl
*n
, NvmeRequest
*req
)
1452 NvmeIdentify
*c
= (NvmeIdentify
*)&req
->cmd
;
1453 NvmeIdNs
*id_ns
, inactive
= { 0 };
1454 uint32_t nsid
= le32_to_cpu(c
->nsid
);
1456 trace_pci_nvme_identify_ns(nsid
);
1458 if (!nvme_nsid_valid(n
, nsid
) || nsid
== NVME_NSID_BROADCAST
) {
1459 return NVME_INVALID_NSID
| NVME_DNR
;
1462 ns
= nvme_ns(n
, nsid
);
1463 if (unlikely(!ns
)) {
1469 return nvme_dma(n
, (uint8_t *)id_ns
, sizeof(NvmeIdNs
),
1470 DMA_DIRECTION_FROM_DEVICE
, req
);
1473 static uint16_t nvme_identify_nslist(NvmeCtrl
*n
, NvmeRequest
*req
)
1475 NvmeIdentify
*c
= (NvmeIdentify
*)&req
->cmd
;
1476 static const int data_len
= NVME_IDENTIFY_DATA_SIZE
;
1477 uint32_t min_nsid
= le32_to_cpu(c
->nsid
);
1482 trace_pci_nvme_identify_nslist(min_nsid
);
1485 * Both 0xffffffff (NVME_NSID_BROADCAST) and 0xfffffffe are invalid values
1486 * since the Active Namespace ID List should return namespaces with ids
1487 * *higher* than the NSID specified in the command. This is also specified
1488 * in the spec (NVM Express v1.3d, Section 5.15.4).
1490 if (min_nsid
>= NVME_NSID_BROADCAST
- 1) {
1491 return NVME_INVALID_NSID
| NVME_DNR
;
1494 list
= g_malloc0(data_len
);
1495 for (int i
= 1; i
<= n
->num_namespaces
; i
++) {
1496 if (i
<= min_nsid
|| !nvme_ns(n
, i
)) {
1499 list
[j
++] = cpu_to_le32(i
);
1500 if (j
== data_len
/ sizeof(uint32_t)) {
1504 ret
= nvme_dma(n
, (uint8_t *)list
, data_len
, DMA_DIRECTION_FROM_DEVICE
,
1510 static uint16_t nvme_identify_ns_descr_list(NvmeCtrl
*n
, NvmeRequest
*req
)
1512 NvmeIdentify
*c
= (NvmeIdentify
*)&req
->cmd
;
1513 uint32_t nsid
= le32_to_cpu(c
->nsid
);
1514 uint8_t list
[NVME_IDENTIFY_DATA_SIZE
];
1523 struct data
*ns_descrs
= (struct data
*)list
;
1525 trace_pci_nvme_identify_ns_descr_list(nsid
);
1527 if (!nvme_nsid_valid(n
, nsid
) || nsid
== NVME_NSID_BROADCAST
) {
1528 return NVME_INVALID_NSID
| NVME_DNR
;
1531 if (unlikely(!nvme_ns(n
, nsid
))) {
1532 return NVME_INVALID_FIELD
| NVME_DNR
;
1535 memset(list
, 0x0, sizeof(list
));
1538 * Because the NGUID and EUI64 fields are 0 in the Identify Namespace data
1539 * structure, a Namespace UUID (nidt = 0x3) must be reported in the
1540 * Namespace Identification Descriptor. Add a very basic Namespace UUID
1543 ns_descrs
->uuid
.hdr
.nidt
= NVME_NIDT_UUID
;
1544 ns_descrs
->uuid
.hdr
.nidl
= NVME_NIDT_UUID_LEN
;
1545 stl_be_p(&ns_descrs
->uuid
.v
, nsid
);
1547 return nvme_dma(n
, list
, NVME_IDENTIFY_DATA_SIZE
,
1548 DMA_DIRECTION_FROM_DEVICE
, req
);
1551 static uint16_t nvme_identify(NvmeCtrl
*n
, NvmeRequest
*req
)
1553 NvmeIdentify
*c
= (NvmeIdentify
*)&req
->cmd
;
1555 switch (le32_to_cpu(c
->cns
)) {
1556 case NVME_ID_CNS_NS
:
1557 return nvme_identify_ns(n
, req
);
1558 case NVME_ID_CNS_CTRL
:
1559 return nvme_identify_ctrl(n
, req
);
1560 case NVME_ID_CNS_NS_ACTIVE_LIST
:
1561 return nvme_identify_nslist(n
, req
);
1562 case NVME_ID_CNS_NS_DESCR_LIST
:
1563 return nvme_identify_ns_descr_list(n
, req
);
1565 trace_pci_nvme_err_invalid_identify_cns(le32_to_cpu(c
->cns
));
1566 return NVME_INVALID_FIELD
| NVME_DNR
;
1570 static uint16_t nvme_abort(NvmeCtrl
*n
, NvmeRequest
*req
)
1572 uint16_t sqid
= le32_to_cpu(req
->cmd
.cdw10
) & 0xffff;
1574 req
->cqe
.result
= 1;
1575 if (nvme_check_sqid(n
, sqid
)) {
1576 return NVME_INVALID_FIELD
| NVME_DNR
;
1579 return NVME_SUCCESS
;
1582 static inline void nvme_set_timestamp(NvmeCtrl
*n
, uint64_t ts
)
1584 trace_pci_nvme_setfeat_timestamp(ts
);
1586 n
->host_timestamp
= le64_to_cpu(ts
);
1587 n
->timestamp_set_qemu_clock_ms
= qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
);
1590 static inline uint64_t nvme_get_timestamp(const NvmeCtrl
*n
)
1592 uint64_t current_time
= qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
);
1593 uint64_t elapsed_time
= current_time
- n
->timestamp_set_qemu_clock_ms
;
1595 union nvme_timestamp
{
1597 uint64_t timestamp
:48;
1605 union nvme_timestamp ts
;
1607 ts
.timestamp
= n
->host_timestamp
+ elapsed_time
;
1609 /* If the host timestamp is non-zero, set the timestamp origin */
1610 ts
.origin
= n
->host_timestamp
? 0x01 : 0x00;
1612 trace_pci_nvme_getfeat_timestamp(ts
.all
);
1614 return cpu_to_le64(ts
.all
);
1617 static uint16_t nvme_get_feature_timestamp(NvmeCtrl
*n
, NvmeRequest
*req
)
1619 uint64_t timestamp
= nvme_get_timestamp(n
);
1621 return nvme_dma(n
, (uint8_t *)×tamp
, sizeof(timestamp
),
1622 DMA_DIRECTION_FROM_DEVICE
, req
);
1625 static uint16_t nvme_get_feature(NvmeCtrl
*n
, NvmeRequest
*req
)
1627 NvmeCmd
*cmd
= &req
->cmd
;
1628 uint32_t dw10
= le32_to_cpu(cmd
->cdw10
);
1629 uint32_t dw11
= le32_to_cpu(cmd
->cdw11
);
1630 uint32_t nsid
= le32_to_cpu(cmd
->nsid
);
1632 uint8_t fid
= NVME_GETSETFEAT_FID(dw10
);
1633 NvmeGetFeatureSelect sel
= NVME_GETFEAT_SELECT(dw10
);
1636 static const uint32_t nvme_feature_default
[NVME_FID_MAX
] = {
1637 [NVME_ARBITRATION
] = NVME_ARB_AB_NOLIMIT
,
1640 trace_pci_nvme_getfeat(nvme_cid(req
), nsid
, fid
, sel
, dw11
);
1642 if (!nvme_feature_support
[fid
]) {
1643 return NVME_INVALID_FIELD
| NVME_DNR
;
1646 if (nvme_feature_cap
[fid
] & NVME_FEAT_CAP_NS
) {
1647 if (!nvme_nsid_valid(n
, nsid
) || nsid
== NVME_NSID_BROADCAST
) {
1649 * The Reservation Notification Mask and Reservation Persistence
1650 * features require a status code of Invalid Field in Command when
1651 * NSID is 0xFFFFFFFF. Since the device does not support those
1652 * features we can always return Invalid Namespace or Format as we
1653 * should do for all other features.
1655 return NVME_INVALID_NSID
| NVME_DNR
;
1658 if (!nvme_ns(n
, nsid
)) {
1659 return NVME_INVALID_FIELD
| NVME_DNR
;
1664 case NVME_GETFEAT_SELECT_CURRENT
:
1666 case NVME_GETFEAT_SELECT_SAVED
:
1667 /* no features are saveable by the controller; fallthrough */
1668 case NVME_GETFEAT_SELECT_DEFAULT
:
1670 case NVME_GETFEAT_SELECT_CAP
:
1671 result
= nvme_feature_cap
[fid
];
1676 case NVME_TEMPERATURE_THRESHOLD
:
1680 * The controller only implements the Composite Temperature sensor, so
1681 * return 0 for all other sensors.
1683 if (NVME_TEMP_TMPSEL(dw11
) != NVME_TEMP_TMPSEL_COMPOSITE
) {
1687 switch (NVME_TEMP_THSEL(dw11
)) {
1688 case NVME_TEMP_THSEL_OVER
:
1689 result
= n
->features
.temp_thresh_hi
;
1691 case NVME_TEMP_THSEL_UNDER
:
1692 result
= n
->features
.temp_thresh_low
;
1696 return NVME_INVALID_FIELD
| NVME_DNR
;
1697 case NVME_VOLATILE_WRITE_CACHE
:
1698 result
= n
->features
.vwc
;
1699 trace_pci_nvme_getfeat_vwcache(result
? "enabled" : "disabled");
1701 case NVME_ASYNCHRONOUS_EVENT_CONF
:
1702 result
= n
->features
.async_config
;
1704 case NVME_TIMESTAMP
:
1705 return nvme_get_feature_timestamp(n
, req
);
1712 case NVME_TEMPERATURE_THRESHOLD
:
1715 if (NVME_TEMP_TMPSEL(dw11
) != NVME_TEMP_TMPSEL_COMPOSITE
) {
1719 if (NVME_TEMP_THSEL(dw11
) == NVME_TEMP_THSEL_OVER
) {
1720 result
= NVME_TEMPERATURE_WARNING
;
1724 case NVME_NUMBER_OF_QUEUES
:
1725 result
= (n
->params
.max_ioqpairs
- 1) |
1726 ((n
->params
.max_ioqpairs
- 1) << 16);
1727 trace_pci_nvme_getfeat_numq(result
);
1729 case NVME_INTERRUPT_VECTOR_CONF
:
1731 if (iv
>= n
->params
.max_ioqpairs
+ 1) {
1732 return NVME_INVALID_FIELD
| NVME_DNR
;
1736 if (iv
== n
->admin_cq
.vector
) {
1737 result
|= NVME_INTVC_NOCOALESCING
;
1742 result
= nvme_feature_default
[fid
];
1747 req
->cqe
.result
= cpu_to_le32(result
);
1748 return NVME_SUCCESS
;
1751 static uint16_t nvme_set_feature_timestamp(NvmeCtrl
*n
, NvmeRequest
*req
)
1756 ret
= nvme_dma(n
, (uint8_t *)×tamp
, sizeof(timestamp
),
1757 DMA_DIRECTION_TO_DEVICE
, req
);
1758 if (ret
!= NVME_SUCCESS
) {
1762 nvme_set_timestamp(n
, timestamp
);
1764 return NVME_SUCCESS
;
1767 static uint16_t nvme_set_feature(NvmeCtrl
*n
, NvmeRequest
*req
)
1771 NvmeCmd
*cmd
= &req
->cmd
;
1772 uint32_t dw10
= le32_to_cpu(cmd
->cdw10
);
1773 uint32_t dw11
= le32_to_cpu(cmd
->cdw11
);
1774 uint32_t nsid
= le32_to_cpu(cmd
->nsid
);
1775 uint8_t fid
= NVME_GETSETFEAT_FID(dw10
);
1776 uint8_t save
= NVME_SETFEAT_SAVE(dw10
);
1778 trace_pci_nvme_setfeat(nvme_cid(req
), nsid
, fid
, save
, dw11
);
1781 return NVME_FID_NOT_SAVEABLE
| NVME_DNR
;
1784 if (!nvme_feature_support
[fid
]) {
1785 return NVME_INVALID_FIELD
| NVME_DNR
;
1788 if (nvme_feature_cap
[fid
] & NVME_FEAT_CAP_NS
) {
1789 if (nsid
!= NVME_NSID_BROADCAST
) {
1790 if (!nvme_nsid_valid(n
, nsid
)) {
1791 return NVME_INVALID_NSID
| NVME_DNR
;
1794 ns
= nvme_ns(n
, nsid
);
1795 if (unlikely(!ns
)) {
1796 return NVME_INVALID_FIELD
| NVME_DNR
;
1799 } else if (nsid
&& nsid
!= NVME_NSID_BROADCAST
) {
1800 if (!nvme_nsid_valid(n
, nsid
)) {
1801 return NVME_INVALID_NSID
| NVME_DNR
;
1804 return NVME_FEAT_NOT_NS_SPEC
| NVME_DNR
;
1807 if (!(nvme_feature_cap
[fid
] & NVME_FEAT_CAP_CHANGE
)) {
1808 return NVME_FEAT_NOT_CHANGEABLE
| NVME_DNR
;
1812 case NVME_TEMPERATURE_THRESHOLD
:
1813 if (NVME_TEMP_TMPSEL(dw11
) != NVME_TEMP_TMPSEL_COMPOSITE
) {
1817 switch (NVME_TEMP_THSEL(dw11
)) {
1818 case NVME_TEMP_THSEL_OVER
:
1819 n
->features
.temp_thresh_hi
= NVME_TEMP_TMPTH(dw11
);
1821 case NVME_TEMP_THSEL_UNDER
:
1822 n
->features
.temp_thresh_low
= NVME_TEMP_TMPTH(dw11
);
1825 return NVME_INVALID_FIELD
| NVME_DNR
;
1828 if (((n
->temperature
>= n
->features
.temp_thresh_hi
) ||
1829 (n
->temperature
<= n
->features
.temp_thresh_low
)) &&
1830 NVME_AEC_SMART(n
->features
.async_config
) & NVME_SMART_TEMPERATURE
) {
1831 nvme_enqueue_event(n
, NVME_AER_TYPE_SMART
,
1832 NVME_AER_INFO_SMART_TEMP_THRESH
,
1833 NVME_LOG_SMART_INFO
);
1837 case NVME_VOLATILE_WRITE_CACHE
:
1838 n
->features
.vwc
= dw11
& 0x1;
1840 for (int i
= 1; i
<= n
->num_namespaces
; i
++) {
1846 if (!(dw11
& 0x1) && blk_enable_write_cache(ns
->blkconf
.blk
)) {
1847 blk_flush(ns
->blkconf
.blk
);
1850 blk_set_enable_write_cache(ns
->blkconf
.blk
, dw11
& 1);
1855 case NVME_NUMBER_OF_QUEUES
:
1856 if (n
->qs_created
) {
1857 return NVME_CMD_SEQ_ERROR
| NVME_DNR
;
1861 * NVMe v1.3, Section 5.21.1.7: 0xffff is not an allowed value for NCQR
1864 if ((dw11
& 0xffff) == 0xffff || ((dw11
>> 16) & 0xffff) == 0xffff) {
1865 return NVME_INVALID_FIELD
| NVME_DNR
;
1868 trace_pci_nvme_setfeat_numq((dw11
& 0xFFFF) + 1,
1869 ((dw11
>> 16) & 0xFFFF) + 1,
1870 n
->params
.max_ioqpairs
,
1871 n
->params
.max_ioqpairs
);
1872 req
->cqe
.result
= cpu_to_le32((n
->params
.max_ioqpairs
- 1) |
1873 ((n
->params
.max_ioqpairs
- 1) << 16));
1875 case NVME_ASYNCHRONOUS_EVENT_CONF
:
1876 n
->features
.async_config
= dw11
;
1878 case NVME_TIMESTAMP
:
1879 return nvme_set_feature_timestamp(n
, req
);
1881 return NVME_FEAT_NOT_CHANGEABLE
| NVME_DNR
;
1883 return NVME_SUCCESS
;
1886 static uint16_t nvme_aer(NvmeCtrl
*n
, NvmeRequest
*req
)
1888 trace_pci_nvme_aer(nvme_cid(req
));
1890 if (n
->outstanding_aers
> n
->params
.aerl
) {
1891 trace_pci_nvme_aer_aerl_exceeded();
1892 return NVME_AER_LIMIT_EXCEEDED
;
1895 n
->aer_reqs
[n
->outstanding_aers
] = req
;
1896 n
->outstanding_aers
++;
1898 if (!QTAILQ_EMPTY(&n
->aer_queue
)) {
1899 nvme_process_aers(n
);
1902 return NVME_NO_COMPLETE
;
1905 static uint16_t nvme_admin_cmd(NvmeCtrl
*n
, NvmeRequest
*req
)
1907 trace_pci_nvme_admin_cmd(nvme_cid(req
), nvme_sqid(req
), req
->cmd
.opcode
,
1908 nvme_adm_opc_str(req
->cmd
.opcode
));
1910 switch (req
->cmd
.opcode
) {
1911 case NVME_ADM_CMD_DELETE_SQ
:
1912 return nvme_del_sq(n
, req
);
1913 case NVME_ADM_CMD_CREATE_SQ
:
1914 return nvme_create_sq(n
, req
);
1915 case NVME_ADM_CMD_GET_LOG_PAGE
:
1916 return nvme_get_log(n
, req
);
1917 case NVME_ADM_CMD_DELETE_CQ
:
1918 return nvme_del_cq(n
, req
);
1919 case NVME_ADM_CMD_CREATE_CQ
:
1920 return nvme_create_cq(n
, req
);
1921 case NVME_ADM_CMD_IDENTIFY
:
1922 return nvme_identify(n
, req
);
1923 case NVME_ADM_CMD_ABORT
:
1924 return nvme_abort(n
, req
);
1925 case NVME_ADM_CMD_SET_FEATURES
:
1926 return nvme_set_feature(n
, req
);
1927 case NVME_ADM_CMD_GET_FEATURES
:
1928 return nvme_get_feature(n
, req
);
1929 case NVME_ADM_CMD_ASYNC_EV_REQ
:
1930 return nvme_aer(n
, req
);
1932 trace_pci_nvme_err_invalid_admin_opc(req
->cmd
.opcode
);
1933 return NVME_INVALID_OPCODE
| NVME_DNR
;
1937 static void nvme_process_sq(void *opaque
)
1939 NvmeSQueue
*sq
= opaque
;
1940 NvmeCtrl
*n
= sq
->ctrl
;
1941 NvmeCQueue
*cq
= n
->cq
[sq
->cqid
];
1948 while (!(nvme_sq_empty(sq
) || QTAILQ_EMPTY(&sq
->req_list
))) {
1949 addr
= sq
->dma_addr
+ sq
->head
* n
->sqe_size
;
1950 if (nvme_addr_read(n
, addr
, (void *)&cmd
, sizeof(cmd
))) {
1951 trace_pci_nvme_err_addr_read(addr
);
1952 trace_pci_nvme_err_cfs();
1953 n
->bar
.csts
= NVME_CSTS_FAILED
;
1956 nvme_inc_sq_head(sq
);
1958 req
= QTAILQ_FIRST(&sq
->req_list
);
1959 QTAILQ_REMOVE(&sq
->req_list
, req
, entry
);
1960 QTAILQ_INSERT_TAIL(&sq
->out_req_list
, req
, entry
);
1961 nvme_req_clear(req
);
1962 req
->cqe
.cid
= cmd
.cid
;
1963 memcpy(&req
->cmd
, &cmd
, sizeof(NvmeCmd
));
1965 status
= sq
->sqid
? nvme_io_cmd(n
, req
) :
1966 nvme_admin_cmd(n
, req
);
1967 if (status
!= NVME_NO_COMPLETE
) {
1968 req
->status
= status
;
1969 nvme_enqueue_req_completion(cq
, req
);
1974 static void nvme_clear_ctrl(NvmeCtrl
*n
)
1979 for (i
= 1; i
<= n
->num_namespaces
; i
++) {
1988 for (i
= 0; i
< n
->params
.max_ioqpairs
+ 1; i
++) {
1989 if (n
->sq
[i
] != NULL
) {
1990 nvme_free_sq(n
->sq
[i
], n
);
1993 for (i
= 0; i
< n
->params
.max_ioqpairs
+ 1; i
++) {
1994 if (n
->cq
[i
] != NULL
) {
1995 nvme_free_cq(n
->cq
[i
], n
);
1999 while (!QTAILQ_EMPTY(&n
->aer_queue
)) {
2000 NvmeAsyncEvent
*event
= QTAILQ_FIRST(&n
->aer_queue
);
2001 QTAILQ_REMOVE(&n
->aer_queue
, event
, entry
);
2006 n
->outstanding_aers
= 0;
2007 n
->qs_created
= false;
2009 for (i
= 1; i
<= n
->num_namespaces
; i
++) {
2021 static int nvme_start_ctrl(NvmeCtrl
*n
)
2023 uint32_t page_bits
= NVME_CC_MPS(n
->bar
.cc
) + 12;
2024 uint32_t page_size
= 1 << page_bits
;
2026 if (unlikely(n
->cq
[0])) {
2027 trace_pci_nvme_err_startfail_cq();
2030 if (unlikely(n
->sq
[0])) {
2031 trace_pci_nvme_err_startfail_sq();
2034 if (unlikely(!n
->bar
.asq
)) {
2035 trace_pci_nvme_err_startfail_nbarasq();
2038 if (unlikely(!n
->bar
.acq
)) {
2039 trace_pci_nvme_err_startfail_nbaracq();
2042 if (unlikely(n
->bar
.asq
& (page_size
- 1))) {
2043 trace_pci_nvme_err_startfail_asq_misaligned(n
->bar
.asq
);
2046 if (unlikely(n
->bar
.acq
& (page_size
- 1))) {
2047 trace_pci_nvme_err_startfail_acq_misaligned(n
->bar
.acq
);
2050 if (unlikely(!(NVME_CAP_CSS(n
->bar
.cap
) & (1 << NVME_CC_CSS(n
->bar
.cc
))))) {
2051 trace_pci_nvme_err_startfail_css(NVME_CC_CSS(n
->bar
.cc
));
2054 if (unlikely(NVME_CC_MPS(n
->bar
.cc
) <
2055 NVME_CAP_MPSMIN(n
->bar
.cap
))) {
2056 trace_pci_nvme_err_startfail_page_too_small(
2057 NVME_CC_MPS(n
->bar
.cc
),
2058 NVME_CAP_MPSMIN(n
->bar
.cap
));
2061 if (unlikely(NVME_CC_MPS(n
->bar
.cc
) >
2062 NVME_CAP_MPSMAX(n
->bar
.cap
))) {
2063 trace_pci_nvme_err_startfail_page_too_large(
2064 NVME_CC_MPS(n
->bar
.cc
),
2065 NVME_CAP_MPSMAX(n
->bar
.cap
));
2068 if (unlikely(NVME_CC_IOCQES(n
->bar
.cc
) <
2069 NVME_CTRL_CQES_MIN(n
->id_ctrl
.cqes
))) {
2070 trace_pci_nvme_err_startfail_cqent_too_small(
2071 NVME_CC_IOCQES(n
->bar
.cc
),
2072 NVME_CTRL_CQES_MIN(n
->bar
.cap
));
2075 if (unlikely(NVME_CC_IOCQES(n
->bar
.cc
) >
2076 NVME_CTRL_CQES_MAX(n
->id_ctrl
.cqes
))) {
2077 trace_pci_nvme_err_startfail_cqent_too_large(
2078 NVME_CC_IOCQES(n
->bar
.cc
),
2079 NVME_CTRL_CQES_MAX(n
->bar
.cap
));
2082 if (unlikely(NVME_CC_IOSQES(n
->bar
.cc
) <
2083 NVME_CTRL_SQES_MIN(n
->id_ctrl
.sqes
))) {
2084 trace_pci_nvme_err_startfail_sqent_too_small(
2085 NVME_CC_IOSQES(n
->bar
.cc
),
2086 NVME_CTRL_SQES_MIN(n
->bar
.cap
));
2089 if (unlikely(NVME_CC_IOSQES(n
->bar
.cc
) >
2090 NVME_CTRL_SQES_MAX(n
->id_ctrl
.sqes
))) {
2091 trace_pci_nvme_err_startfail_sqent_too_large(
2092 NVME_CC_IOSQES(n
->bar
.cc
),
2093 NVME_CTRL_SQES_MAX(n
->bar
.cap
));
2096 if (unlikely(!NVME_AQA_ASQS(n
->bar
.aqa
))) {
2097 trace_pci_nvme_err_startfail_asqent_sz_zero();
2100 if (unlikely(!NVME_AQA_ACQS(n
->bar
.aqa
))) {
2101 trace_pci_nvme_err_startfail_acqent_sz_zero();
2105 n
->page_bits
= page_bits
;
2106 n
->page_size
= page_size
;
2107 n
->max_prp_ents
= n
->page_size
/ sizeof(uint64_t);
2108 n
->cqe_size
= 1 << NVME_CC_IOCQES(n
->bar
.cc
);
2109 n
->sqe_size
= 1 << NVME_CC_IOSQES(n
->bar
.cc
);
2110 nvme_init_cq(&n
->admin_cq
, n
, n
->bar
.acq
, 0, 0,
2111 NVME_AQA_ACQS(n
->bar
.aqa
) + 1, 1);
2112 nvme_init_sq(&n
->admin_sq
, n
, n
->bar
.asq
, 0, 0,
2113 NVME_AQA_ASQS(n
->bar
.aqa
) + 1);
2115 nvme_set_timestamp(n
, 0ULL);
2117 QTAILQ_INIT(&n
->aer_queue
);
2122 static void nvme_write_bar(NvmeCtrl
*n
, hwaddr offset
, uint64_t data
,
2125 if (unlikely(offset
& (sizeof(uint32_t) - 1))) {
2126 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_misaligned32
,
2127 "MMIO write not 32-bit aligned,"
2128 " offset=0x%"PRIx64
"", offset
);
2129 /* should be ignored, fall through for now */
2132 if (unlikely(size
< sizeof(uint32_t))) {
2133 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_toosmall
,
2134 "MMIO write smaller than 32-bits,"
2135 " offset=0x%"PRIx64
", size=%u",
2137 /* should be ignored, fall through for now */
2141 case 0xc: /* INTMS */
2142 if (unlikely(msix_enabled(&(n
->parent_obj
)))) {
2143 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_intmask_with_msix
,
2144 "undefined access to interrupt mask set"
2145 " when MSI-X is enabled");
2146 /* should be ignored, fall through for now */
2148 n
->bar
.intms
|= data
& 0xffffffff;
2149 n
->bar
.intmc
= n
->bar
.intms
;
2150 trace_pci_nvme_mmio_intm_set(data
& 0xffffffff, n
->bar
.intmc
);
2153 case 0x10: /* INTMC */
2154 if (unlikely(msix_enabled(&(n
->parent_obj
)))) {
2155 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_intmask_with_msix
,
2156 "undefined access to interrupt mask clr"
2157 " when MSI-X is enabled");
2158 /* should be ignored, fall through for now */
2160 n
->bar
.intms
&= ~(data
& 0xffffffff);
2161 n
->bar
.intmc
= n
->bar
.intms
;
2162 trace_pci_nvme_mmio_intm_clr(data
& 0xffffffff, n
->bar
.intmc
);
2166 trace_pci_nvme_mmio_cfg(data
& 0xffffffff);
2167 /* Windows first sends data, then sends enable bit */
2168 if (!NVME_CC_EN(data
) && !NVME_CC_EN(n
->bar
.cc
) &&
2169 !NVME_CC_SHN(data
) && !NVME_CC_SHN(n
->bar
.cc
))
2174 if (NVME_CC_EN(data
) && !NVME_CC_EN(n
->bar
.cc
)) {
2176 if (unlikely(nvme_start_ctrl(n
))) {
2177 trace_pci_nvme_err_startfail();
2178 n
->bar
.csts
= NVME_CSTS_FAILED
;
2180 trace_pci_nvme_mmio_start_success();
2181 n
->bar
.csts
= NVME_CSTS_READY
;
2183 } else if (!NVME_CC_EN(data
) && NVME_CC_EN(n
->bar
.cc
)) {
2184 trace_pci_nvme_mmio_stopped();
2186 n
->bar
.csts
&= ~NVME_CSTS_READY
;
2188 if (NVME_CC_SHN(data
) && !(NVME_CC_SHN(n
->bar
.cc
))) {
2189 trace_pci_nvme_mmio_shutdown_set();
2192 n
->bar
.csts
|= NVME_CSTS_SHST_COMPLETE
;
2193 } else if (!NVME_CC_SHN(data
) && NVME_CC_SHN(n
->bar
.cc
)) {
2194 trace_pci_nvme_mmio_shutdown_cleared();
2195 n
->bar
.csts
&= ~NVME_CSTS_SHST_COMPLETE
;
2199 case 0x1C: /* CSTS */
2200 if (data
& (1 << 4)) {
2201 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_ssreset_w1c_unsupported
,
2202 "attempted to W1C CSTS.NSSRO"
2203 " but CAP.NSSRS is zero (not supported)");
2204 } else if (data
!= 0) {
2205 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_ro_csts
,
2206 "attempted to set a read only bit"
2207 " of controller status");
2210 case 0x20: /* NSSR */
2211 if (data
== 0x4E564D65) {
2212 trace_pci_nvme_ub_mmiowr_ssreset_unsupported();
2214 /* The spec says that writes of other values have no effect */
2218 case 0x24: /* AQA */
2219 n
->bar
.aqa
= data
& 0xffffffff;
2220 trace_pci_nvme_mmio_aqattr(data
& 0xffffffff);
2222 case 0x28: /* ASQ */
2224 trace_pci_nvme_mmio_asqaddr(data
);
2226 case 0x2c: /* ASQ hi */
2227 n
->bar
.asq
|= data
<< 32;
2228 trace_pci_nvme_mmio_asqaddr_hi(data
, n
->bar
.asq
);
2230 case 0x30: /* ACQ */
2231 trace_pci_nvme_mmio_acqaddr(data
);
2234 case 0x34: /* ACQ hi */
2235 n
->bar
.acq
|= data
<< 32;
2236 trace_pci_nvme_mmio_acqaddr_hi(data
, n
->bar
.acq
);
2238 case 0x38: /* CMBLOC */
2239 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_cmbloc_reserved
,
2240 "invalid write to reserved CMBLOC"
2241 " when CMBSZ is zero, ignored");
2243 case 0x3C: /* CMBSZ */
2244 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_cmbsz_readonly
,
2245 "invalid write to read only CMBSZ, ignored");
2247 case 0xE00: /* PMRCAP */
2248 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrcap_readonly
,
2249 "invalid write to PMRCAP register, ignored");
2251 case 0xE04: /* TODO PMRCTL */
2253 case 0xE08: /* PMRSTS */
2254 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrsts_readonly
,
2255 "invalid write to PMRSTS register, ignored");
2257 case 0xE0C: /* PMREBS */
2258 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrebs_readonly
,
2259 "invalid write to PMREBS register, ignored");
2261 case 0xE10: /* PMRSWTP */
2262 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrswtp_readonly
,
2263 "invalid write to PMRSWTP register, ignored");
2265 case 0xE14: /* TODO PMRMSC */
2268 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_invalid
,
2269 "invalid MMIO write,"
2270 " offset=0x%"PRIx64
", data=%"PRIx64
"",
2276 static uint64_t nvme_mmio_read(void *opaque
, hwaddr addr
, unsigned size
)
2278 NvmeCtrl
*n
= (NvmeCtrl
*)opaque
;
2279 uint8_t *ptr
= (uint8_t *)&n
->bar
;
2282 trace_pci_nvme_mmio_read(addr
);
2284 if (unlikely(addr
& (sizeof(uint32_t) - 1))) {
2285 NVME_GUEST_ERR(pci_nvme_ub_mmiord_misaligned32
,
2286 "MMIO read not 32-bit aligned,"
2287 " offset=0x%"PRIx64
"", addr
);
2288 /* should RAZ, fall through for now */
2289 } else if (unlikely(size
< sizeof(uint32_t))) {
2290 NVME_GUEST_ERR(pci_nvme_ub_mmiord_toosmall
,
2291 "MMIO read smaller than 32-bits,"
2292 " offset=0x%"PRIx64
"", addr
);
2293 /* should RAZ, fall through for now */
2296 if (addr
< sizeof(n
->bar
)) {
2298 * When PMRWBM bit 1 is set then read from
2299 * from PMRSTS should ensure prior writes
2300 * made it to persistent media
2302 if (addr
== 0xE08 &&
2303 (NVME_PMRCAP_PMRWBM(n
->bar
.pmrcap
) & 0x02)) {
2304 memory_region_msync(&n
->pmrdev
->mr
, 0, n
->pmrdev
->size
);
2306 memcpy(&val
, ptr
+ addr
, size
);
2308 NVME_GUEST_ERR(pci_nvme_ub_mmiord_invalid_ofs
,
2309 "MMIO read beyond last register,"
2310 " offset=0x%"PRIx64
", returning 0", addr
);
2316 static void nvme_process_db(NvmeCtrl
*n
, hwaddr addr
, int val
)
2320 if (unlikely(addr
& ((1 << 2) - 1))) {
2321 NVME_GUEST_ERR(pci_nvme_ub_db_wr_misaligned
,
2322 "doorbell write not 32-bit aligned,"
2323 " offset=0x%"PRIx64
", ignoring", addr
);
2327 if (((addr
- 0x1000) >> 2) & 1) {
2328 /* Completion queue doorbell write */
2330 uint16_t new_head
= val
& 0xffff;
2334 qid
= (addr
- (0x1000 + (1 << 2))) >> 3;
2335 if (unlikely(nvme_check_cqid(n
, qid
))) {
2336 NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_cq
,
2337 "completion queue doorbell write"
2338 " for nonexistent queue,"
2339 " sqid=%"PRIu32
", ignoring", qid
);
2342 * NVM Express v1.3d, Section 4.1 state: "If host software writes
2343 * an invalid value to the Submission Queue Tail Doorbell or
2344 * Completion Queue Head Doorbell regiter and an Asynchronous Event
2345 * Request command is outstanding, then an asynchronous event is
2346 * posted to the Admin Completion Queue with a status code of
2347 * Invalid Doorbell Write Value."
2349 * Also note that the spec includes the "Invalid Doorbell Register"
2350 * status code, but nowhere does it specify when to use it.
2351 * However, it seems reasonable to use it here in a similar
2354 if (n
->outstanding_aers
) {
2355 nvme_enqueue_event(n
, NVME_AER_TYPE_ERROR
,
2356 NVME_AER_INFO_ERR_INVALID_DB_REGISTER
,
2357 NVME_LOG_ERROR_INFO
);
2364 if (unlikely(new_head
>= cq
->size
)) {
2365 NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_cqhead
,
2366 "completion queue doorbell write value"
2367 " beyond queue size, sqid=%"PRIu32
","
2368 " new_head=%"PRIu16
", ignoring",
2371 if (n
->outstanding_aers
) {
2372 nvme_enqueue_event(n
, NVME_AER_TYPE_ERROR
,
2373 NVME_AER_INFO_ERR_INVALID_DB_VALUE
,
2374 NVME_LOG_ERROR_INFO
);
2380 trace_pci_nvme_mmio_doorbell_cq(cq
->cqid
, new_head
);
2382 start_sqs
= nvme_cq_full(cq
) ? 1 : 0;
2383 cq
->head
= new_head
;
2386 QTAILQ_FOREACH(sq
, &cq
->sq_list
, entry
) {
2387 timer_mod(sq
->timer
, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) + 500);
2389 timer_mod(cq
->timer
, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) + 500);
2392 if (cq
->tail
== cq
->head
) {
2393 nvme_irq_deassert(n
, cq
);
2396 /* Submission queue doorbell write */
2398 uint16_t new_tail
= val
& 0xffff;
2401 qid
= (addr
- 0x1000) >> 3;
2402 if (unlikely(nvme_check_sqid(n
, qid
))) {
2403 NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_sq
,
2404 "submission queue doorbell write"
2405 " for nonexistent queue,"
2406 " sqid=%"PRIu32
", ignoring", qid
);
2408 if (n
->outstanding_aers
) {
2409 nvme_enqueue_event(n
, NVME_AER_TYPE_ERROR
,
2410 NVME_AER_INFO_ERR_INVALID_DB_REGISTER
,
2411 NVME_LOG_ERROR_INFO
);
2418 if (unlikely(new_tail
>= sq
->size
)) {
2419 NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_sqtail
,
2420 "submission queue doorbell write value"
2421 " beyond queue size, sqid=%"PRIu32
","
2422 " new_tail=%"PRIu16
", ignoring",
2425 if (n
->outstanding_aers
) {
2426 nvme_enqueue_event(n
, NVME_AER_TYPE_ERROR
,
2427 NVME_AER_INFO_ERR_INVALID_DB_VALUE
,
2428 NVME_LOG_ERROR_INFO
);
2434 trace_pci_nvme_mmio_doorbell_sq(sq
->sqid
, new_tail
);
2436 sq
->tail
= new_tail
;
2437 timer_mod(sq
->timer
, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) + 500);
2441 static void nvme_mmio_write(void *opaque
, hwaddr addr
, uint64_t data
,
2444 NvmeCtrl
*n
= (NvmeCtrl
*)opaque
;
2446 trace_pci_nvme_mmio_write(addr
, data
);
2448 if (addr
< sizeof(n
->bar
)) {
2449 nvme_write_bar(n
, addr
, data
, size
);
2451 nvme_process_db(n
, addr
, data
);
2455 static const MemoryRegionOps nvme_mmio_ops
= {
2456 .read
= nvme_mmio_read
,
2457 .write
= nvme_mmio_write
,
2458 .endianness
= DEVICE_LITTLE_ENDIAN
,
2460 .min_access_size
= 2,
2461 .max_access_size
= 8,
2465 static void nvme_cmb_write(void *opaque
, hwaddr addr
, uint64_t data
,
2468 NvmeCtrl
*n
= (NvmeCtrl
*)opaque
;
2469 stn_le_p(&n
->cmbuf
[addr
], size
, data
);
2472 static uint64_t nvme_cmb_read(void *opaque
, hwaddr addr
, unsigned size
)
2474 NvmeCtrl
*n
= (NvmeCtrl
*)opaque
;
2475 return ldn_le_p(&n
->cmbuf
[addr
], size
);
2478 static const MemoryRegionOps nvme_cmb_ops
= {
2479 .read
= nvme_cmb_read
,
2480 .write
= nvme_cmb_write
,
2481 .endianness
= DEVICE_LITTLE_ENDIAN
,
2483 .min_access_size
= 1,
2484 .max_access_size
= 8,
2488 static void nvme_check_constraints(NvmeCtrl
*n
, Error
**errp
)
2490 NvmeParams
*params
= &n
->params
;
2492 if (params
->num_queues
) {
2493 warn_report("num_queues is deprecated; please use max_ioqpairs "
2496 params
->max_ioqpairs
= params
->num_queues
- 1;
2500 warn_report("drive property is deprecated; "
2501 "please use an nvme-ns device instead");
2504 if (params
->max_ioqpairs
< 1 ||
2505 params
->max_ioqpairs
> NVME_MAX_IOQPAIRS
) {
2506 error_setg(errp
, "max_ioqpairs must be between 1 and %d",
2511 if (params
->msix_qsize
< 1 ||
2512 params
->msix_qsize
> PCI_MSIX_FLAGS_QSIZE
+ 1) {
2513 error_setg(errp
, "msix_qsize must be between 1 and %d",
2514 PCI_MSIX_FLAGS_QSIZE
+ 1);
2518 if (!params
->serial
) {
2519 error_setg(errp
, "serial property not set");
2523 if (!n
->params
.cmb_size_mb
&& n
->pmrdev
) {
2524 if (host_memory_backend_is_mapped(n
->pmrdev
)) {
2525 error_setg(errp
, "can't use already busy memdev: %s",
2526 object_get_canonical_path_component(OBJECT(n
->pmrdev
)));
2530 if (!is_power_of_2(n
->pmrdev
->size
)) {
2531 error_setg(errp
, "pmr backend size needs to be power of 2 in size");
2535 host_memory_backend_set_mapped(n
->pmrdev
, true);
2539 static void nvme_init_state(NvmeCtrl
*n
)
2541 n
->num_namespaces
= NVME_MAX_NAMESPACES
;
2542 /* add one to max_ioqpairs to account for the admin queue pair */
2543 n
->reg_size
= pow2ceil(sizeof(NvmeBar
) +
2544 2 * (n
->params
.max_ioqpairs
+ 1) * NVME_DB_SIZE
);
2545 n
->sq
= g_new0(NvmeSQueue
*, n
->params
.max_ioqpairs
+ 1);
2546 n
->cq
= g_new0(NvmeCQueue
*, n
->params
.max_ioqpairs
+ 1);
2547 n
->temperature
= NVME_TEMPERATURE
;
2548 n
->features
.temp_thresh_hi
= NVME_TEMPERATURE_WARNING
;
2549 n
->starttime_ms
= qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
);
2550 n
->aer_reqs
= g_new0(NvmeRequest
*, n
->params
.aerl
+ 1);
2553 int nvme_register_namespace(NvmeCtrl
*n
, NvmeNamespace
*ns
, Error
**errp
)
2555 uint32_t nsid
= nvme_nsid(ns
);
2557 if (nsid
> NVME_MAX_NAMESPACES
) {
2558 error_setg(errp
, "invalid namespace id (must be between 0 and %d)",
2559 NVME_MAX_NAMESPACES
);
2564 for (int i
= 1; i
<= n
->num_namespaces
; i
++) {
2565 if (!nvme_ns(n
, i
)) {
2566 nsid
= ns
->params
.nsid
= i
;
2572 error_setg(errp
, "no free namespace id");
2576 if (n
->namespaces
[nsid
- 1]) {
2577 error_setg(errp
, "namespace id '%d' is already in use", nsid
);
2582 trace_pci_nvme_register_namespace(nsid
);
2584 n
->namespaces
[nsid
- 1] = ns
;
2589 static void nvme_init_cmb(NvmeCtrl
*n
, PCIDevice
*pci_dev
)
2591 NVME_CMBLOC_SET_BIR(n
->bar
.cmbloc
, NVME_CMB_BIR
);
2592 NVME_CMBLOC_SET_OFST(n
->bar
.cmbloc
, 0);
2594 NVME_CMBSZ_SET_SQS(n
->bar
.cmbsz
, 1);
2595 NVME_CMBSZ_SET_CQS(n
->bar
.cmbsz
, 0);
2596 NVME_CMBSZ_SET_LISTS(n
->bar
.cmbsz
, 1);
2597 NVME_CMBSZ_SET_RDS(n
->bar
.cmbsz
, 1);
2598 NVME_CMBSZ_SET_WDS(n
->bar
.cmbsz
, 1);
2599 NVME_CMBSZ_SET_SZU(n
->bar
.cmbsz
, 2); /* MBs */
2600 NVME_CMBSZ_SET_SZ(n
->bar
.cmbsz
, n
->params
.cmb_size_mb
);
2602 n
->cmbuf
= g_malloc0(NVME_CMBSZ_GETSIZE(n
->bar
.cmbsz
));
2603 memory_region_init_io(&n
->ctrl_mem
, OBJECT(n
), &nvme_cmb_ops
, n
,
2604 "nvme-cmb", NVME_CMBSZ_GETSIZE(n
->bar
.cmbsz
));
2605 pci_register_bar(pci_dev
, NVME_CMBLOC_BIR(n
->bar
.cmbloc
),
2606 PCI_BASE_ADDRESS_SPACE_MEMORY
|
2607 PCI_BASE_ADDRESS_MEM_TYPE_64
|
2608 PCI_BASE_ADDRESS_MEM_PREFETCH
, &n
->ctrl_mem
);
2611 static void nvme_init_pmr(NvmeCtrl
*n
, PCIDevice
*pci_dev
)
2613 /* Controller Capabilities register */
2614 NVME_CAP_SET_PMRS(n
->bar
.cap
, 1);
2616 /* PMR Capabities register */
2618 NVME_PMRCAP_SET_RDS(n
->bar
.pmrcap
, 0);
2619 NVME_PMRCAP_SET_WDS(n
->bar
.pmrcap
, 0);
2620 NVME_PMRCAP_SET_BIR(n
->bar
.pmrcap
, NVME_PMR_BIR
);
2621 NVME_PMRCAP_SET_PMRTU(n
->bar
.pmrcap
, 0);
2622 /* Turn on bit 1 support */
2623 NVME_PMRCAP_SET_PMRWBM(n
->bar
.pmrcap
, 0x02);
2624 NVME_PMRCAP_SET_PMRTO(n
->bar
.pmrcap
, 0);
2625 NVME_PMRCAP_SET_CMSS(n
->bar
.pmrcap
, 0);
2627 /* PMR Control register */
2629 NVME_PMRCTL_SET_EN(n
->bar
.pmrctl
, 0);
2631 /* PMR Status register */
2633 NVME_PMRSTS_SET_ERR(n
->bar
.pmrsts
, 0);
2634 NVME_PMRSTS_SET_NRDY(n
->bar
.pmrsts
, 0);
2635 NVME_PMRSTS_SET_HSTS(n
->bar
.pmrsts
, 0);
2636 NVME_PMRSTS_SET_CBAI(n
->bar
.pmrsts
, 0);
2638 /* PMR Elasticity Buffer Size register */
2640 NVME_PMREBS_SET_PMRSZU(n
->bar
.pmrebs
, 0);
2641 NVME_PMREBS_SET_RBB(n
->bar
.pmrebs
, 0);
2642 NVME_PMREBS_SET_PMRWBZ(n
->bar
.pmrebs
, 0);
2644 /* PMR Sustained Write Throughput register */
2646 NVME_PMRSWTP_SET_PMRSWTU(n
->bar
.pmrswtp
, 0);
2647 NVME_PMRSWTP_SET_PMRSWTV(n
->bar
.pmrswtp
, 0);
2649 /* PMR Memory Space Control register */
2651 NVME_PMRMSC_SET_CMSE(n
->bar
.pmrmsc
, 0);
2652 NVME_PMRMSC_SET_CBA(n
->bar
.pmrmsc
, 0);
2654 pci_register_bar(pci_dev
, NVME_PMRCAP_BIR(n
->bar
.pmrcap
),
2655 PCI_BASE_ADDRESS_SPACE_MEMORY
|
2656 PCI_BASE_ADDRESS_MEM_TYPE_64
|
2657 PCI_BASE_ADDRESS_MEM_PREFETCH
, &n
->pmrdev
->mr
);
2660 static void nvme_init_pci(NvmeCtrl
*n
, PCIDevice
*pci_dev
, Error
**errp
)
2662 uint8_t *pci_conf
= pci_dev
->config
;
2664 pci_conf
[PCI_INTERRUPT_PIN
] = 1;
2665 pci_config_set_prog_interface(pci_conf
, 0x2);
2667 if (n
->params
.use_intel_id
) {
2668 pci_config_set_vendor_id(pci_conf
, PCI_VENDOR_ID_INTEL
);
2669 pci_config_set_device_id(pci_conf
, 0x5845);
2671 pci_config_set_vendor_id(pci_conf
, PCI_VENDOR_ID_REDHAT
);
2672 pci_config_set_device_id(pci_conf
, PCI_DEVICE_ID_REDHAT_NVME
);
2675 pci_config_set_class(pci_conf
, PCI_CLASS_STORAGE_EXPRESS
);
2676 pcie_endpoint_cap_init(pci_dev
, 0x80);
2678 memory_region_init_io(&n
->iomem
, OBJECT(n
), &nvme_mmio_ops
, n
, "nvme",
2680 pci_register_bar(pci_dev
, 0, PCI_BASE_ADDRESS_SPACE_MEMORY
|
2681 PCI_BASE_ADDRESS_MEM_TYPE_64
, &n
->iomem
);
2682 if (msix_init_exclusive_bar(pci_dev
, n
->params
.msix_qsize
, 4, errp
)) {
2686 if (n
->params
.cmb_size_mb
) {
2687 nvme_init_cmb(n
, pci_dev
);
2688 } else if (n
->pmrdev
) {
2689 nvme_init_pmr(n
, pci_dev
);
2693 static void nvme_init_ctrl(NvmeCtrl
*n
, PCIDevice
*pci_dev
)
2695 NvmeIdCtrl
*id
= &n
->id_ctrl
;
2696 uint8_t *pci_conf
= pci_dev
->config
;
2699 id
->vid
= cpu_to_le16(pci_get_word(pci_conf
+ PCI_VENDOR_ID
));
2700 id
->ssvid
= cpu_to_le16(pci_get_word(pci_conf
+ PCI_SUBSYSTEM_VENDOR_ID
));
2701 strpadcpy((char *)id
->mn
, sizeof(id
->mn
), "QEMU NVMe Ctrl", ' ');
2702 strpadcpy((char *)id
->fr
, sizeof(id
->fr
), "1.0", ' ');
2703 strpadcpy((char *)id
->sn
, sizeof(id
->sn
), n
->params
.serial
, ' ');
2708 id
->mdts
= n
->params
.mdts
;
2709 id
->ver
= cpu_to_le32(NVME_SPEC_VER
);
2710 id
->oacs
= cpu_to_le16(0);
2713 * Because the controller always completes the Abort command immediately,
2714 * there can never be more than one concurrently executing Abort command,
2715 * so this value is never used for anything. Note that there can easily be
2716 * many Abort commands in the queues, but they are not considered
2717 * "executing" until processed by nvme_abort.
2719 * The specification recommends a value of 3 for Abort Command Limit (four
2720 * concurrently outstanding Abort commands), so lets use that though it is
2724 id
->aerl
= n
->params
.aerl
;
2725 id
->frmw
= (NVME_NUM_FW_SLOTS
<< 1) | NVME_FRMW_SLOT1_RO
;
2726 id
->lpa
= NVME_LPA_NS_SMART
| NVME_LPA_EXTENDED
;
2728 /* recommended default value (~70 C) */
2729 id
->wctemp
= cpu_to_le16(NVME_TEMPERATURE_WARNING
);
2730 id
->cctemp
= cpu_to_le16(NVME_TEMPERATURE_CRITICAL
);
2732 id
->sqes
= (0x6 << 4) | 0x6;
2733 id
->cqes
= (0x4 << 4) | 0x4;
2734 id
->nn
= cpu_to_le32(n
->num_namespaces
);
2735 id
->oncs
= cpu_to_le16(NVME_ONCS_WRITE_ZEROES
| NVME_ONCS_TIMESTAMP
|
2736 NVME_ONCS_FEATURES
);
2739 id
->sgls
= cpu_to_le32(NVME_CTRL_SGLS_SUPPORT_NO_ALIGN
|
2740 NVME_CTRL_SGLS_BITBUCKET
);
2742 subnqn
= g_strdup_printf("nqn.2019-08.org.qemu:%s", n
->params
.serial
);
2743 strpadcpy((char *)id
->subnqn
, sizeof(id
->subnqn
), subnqn
, '\0');
2746 id
->psd
[0].mp
= cpu_to_le16(0x9c4);
2747 id
->psd
[0].enlat
= cpu_to_le32(0x10);
2748 id
->psd
[0].exlat
= cpu_to_le32(0x4);
2751 NVME_CAP_SET_MQES(n
->bar
.cap
, 0x7ff);
2752 NVME_CAP_SET_CQR(n
->bar
.cap
, 1);
2753 NVME_CAP_SET_TO(n
->bar
.cap
, 0xf);
2754 NVME_CAP_SET_CSS(n
->bar
.cap
, NVME_CAP_CSS_NVM
);
2755 NVME_CAP_SET_CSS(n
->bar
.cap
, NVME_CAP_CSS_ADMIN_ONLY
);
2756 NVME_CAP_SET_MPSMAX(n
->bar
.cap
, 4);
2758 n
->bar
.vs
= NVME_SPEC_VER
;
2759 n
->bar
.intmc
= n
->bar
.intms
= 0;
2762 static void nvme_realize(PCIDevice
*pci_dev
, Error
**errp
)
2764 NvmeCtrl
*n
= NVME(pci_dev
);
2766 Error
*local_err
= NULL
;
2768 nvme_check_constraints(n
, &local_err
);
2770 error_propagate(errp
, local_err
);
2774 qbus_create_inplace(&n
->bus
, sizeof(NvmeBus
), TYPE_NVME_BUS
,
2775 &pci_dev
->qdev
, n
->parent_obj
.qdev
.id
);
2778 nvme_init_pci(n
, pci_dev
, &local_err
);
2780 error_propagate(errp
, local_err
);
2784 nvme_init_ctrl(n
, pci_dev
);
2786 /* setup a namespace if the controller drive property was given */
2787 if (n
->namespace.blkconf
.blk
) {
2789 ns
->params
.nsid
= 1;
2791 if (nvme_ns_setup(n
, ns
, errp
)) {
2797 static void nvme_exit(PCIDevice
*pci_dev
)
2799 NvmeCtrl
*n
= NVME(pci_dev
);
2804 g_free(n
->aer_reqs
);
2806 if (n
->params
.cmb_size_mb
) {
2811 host_memory_backend_set_mapped(n
->pmrdev
, false);
2813 msix_uninit_exclusive_bar(pci_dev
);
2816 static Property nvme_props
[] = {
2817 DEFINE_BLOCK_PROPERTIES(NvmeCtrl
, namespace.blkconf
),
2818 DEFINE_PROP_LINK("pmrdev", NvmeCtrl
, pmrdev
, TYPE_MEMORY_BACKEND
,
2819 HostMemoryBackend
*),
2820 DEFINE_PROP_STRING("serial", NvmeCtrl
, params
.serial
),
2821 DEFINE_PROP_UINT32("cmb_size_mb", NvmeCtrl
, params
.cmb_size_mb
, 0),
2822 DEFINE_PROP_UINT32("num_queues", NvmeCtrl
, params
.num_queues
, 0),
2823 DEFINE_PROP_UINT32("max_ioqpairs", NvmeCtrl
, params
.max_ioqpairs
, 64),
2824 DEFINE_PROP_UINT16("msix_qsize", NvmeCtrl
, params
.msix_qsize
, 65),
2825 DEFINE_PROP_UINT8("aerl", NvmeCtrl
, params
.aerl
, 3),
2826 DEFINE_PROP_UINT32("aer_max_queued", NvmeCtrl
, params
.aer_max_queued
, 64),
2827 DEFINE_PROP_UINT8("mdts", NvmeCtrl
, params
.mdts
, 7),
2828 DEFINE_PROP_BOOL("use-intel-id", NvmeCtrl
, params
.use_intel_id
, false),
2829 DEFINE_PROP_END_OF_LIST(),
2832 static const VMStateDescription nvme_vmstate
= {
2837 static void nvme_class_init(ObjectClass
*oc
, void *data
)
2839 DeviceClass
*dc
= DEVICE_CLASS(oc
);
2840 PCIDeviceClass
*pc
= PCI_DEVICE_CLASS(oc
);
2842 pc
->realize
= nvme_realize
;
2843 pc
->exit
= nvme_exit
;
2844 pc
->class_id
= PCI_CLASS_STORAGE_EXPRESS
;
2847 set_bit(DEVICE_CATEGORY_STORAGE
, dc
->categories
);
2848 dc
->desc
= "Non-Volatile Memory Express";
2849 device_class_set_props(dc
, nvme_props
);
2850 dc
->vmsd
= &nvme_vmstate
;
2853 static void nvme_instance_init(Object
*obj
)
2855 NvmeCtrl
*s
= NVME(obj
);
2857 if (s
->namespace.blkconf
.blk
) {
2858 device_add_bootindex_property(obj
, &s
->namespace.blkconf
.bootindex
,
2859 "bootindex", "/namespace@1,0",
2864 static const TypeInfo nvme_info
= {
2866 .parent
= TYPE_PCI_DEVICE
,
2867 .instance_size
= sizeof(NvmeCtrl
),
2868 .instance_init
= nvme_instance_init
,
2869 .class_init
= nvme_class_init
,
2870 .interfaces
= (InterfaceInfo
[]) {
2871 { INTERFACE_PCIE_DEVICE
},
2876 static const TypeInfo nvme_bus_info
= {
2877 .name
= TYPE_NVME_BUS
,
2879 .instance_size
= sizeof(NvmeBus
),
2882 static void nvme_register_types(void)
2884 type_register_static(&nvme_info
);
2885 type_register_static(&nvme_bus_info
);
2888 type_init(nvme_register_types
)