2 * QEMU NVM Express Controller
4 * Copyright (c) 2012, Intel Corporation
6 * Written by Keith Busch <keith.busch@intel.com>
8 * This code is licensed under the GNU GPL v2 or later.
12 * Reference Specs: http://www.nvmexpress.org, 1.2, 1.1, 1.0e
14 * https://nvmexpress.org/developers/nvme-specification/
19 * -drive file=<file>,if=none,id=<drive_id>
20 * -device nvme,serial=<serial>,id=<bus_name>, \
21 * cmb_size_mb=<cmb_size_mb[optional]>, \
22 * [pmrdev=<mem_backend_file_id>,] \
23 * max_ioqpairs=<N[optional]>, \
24 * aerl=<N[optional]>, aer_max_queued=<N[optional]>, \
26 * -device nvme-ns,drive=<drive_id>,bus=bus_name,nsid=<nsid>
28 * Note cmb_size_mb denotes size of CMB in MB. CMB is assumed to be at
29 * offset 0 in BAR2 and supports only WDS, RDS and SQS for now.
31 * cmb_size_mb= and pmrdev= options are mutually exclusive due to limitation
32 * in available BAR's. cmb_size_mb= will take precedence over pmrdev= when
34 * Enabling pmr emulation can be achieved by pointing to memory-backend-file.
36 * -object memory-backend-file,id=<mem_id>,share=on,mem-path=<file_path>, \
37 * size=<size> .... -device nvme,...,pmrdev=<mem_id>
40 * nvme device parameters
41 * ~~~~~~~~~~~~~~~~~~~~~~
43 * The Asynchronous Event Request Limit (AERL). Indicates the maximum number
44 * of concurrently outstanding Asynchronous Event Request commands suppoert
45 * by the controller. This is a 0's based value.
48 * This is the maximum number of events that the device will enqueue for
49 * completion when there are no oustanding AERs. When the maximum number of
50 * enqueued events are reached, subsequent events will be dropped.
54 #include "qemu/osdep.h"
55 #include "qemu/units.h"
56 #include "qemu/error-report.h"
57 #include "hw/block/block.h"
58 #include "hw/pci/msix.h"
59 #include "hw/pci/pci.h"
60 #include "hw/qdev-properties.h"
61 #include "migration/vmstate.h"
62 #include "sysemu/sysemu.h"
63 #include "qapi/error.h"
64 #include "qapi/visitor.h"
65 #include "sysemu/hostmem.h"
66 #include "sysemu/block-backend.h"
67 #include "exec/memory.h"
69 #include "qemu/module.h"
70 #include "qemu/cutils.h"
75 #define NVME_MAX_IOQPAIRS 0xffff
76 #define NVME_DB_SIZE 4
77 #define NVME_SPEC_VER 0x00010300
78 #define NVME_CMB_BIR 2
79 #define NVME_PMR_BIR 2
80 #define NVME_TEMPERATURE 0x143
81 #define NVME_TEMPERATURE_WARNING 0x157
82 #define NVME_TEMPERATURE_CRITICAL 0x175
83 #define NVME_NUM_FW_SLOTS 1
85 #define NVME_GUEST_ERR(trace, fmt, ...) \
87 (trace_##trace)(__VA_ARGS__); \
88 qemu_log_mask(LOG_GUEST_ERROR, #trace \
89 " in %s: " fmt "\n", __func__, ## __VA_ARGS__); \
92 static const bool nvme_feature_support
[NVME_FID_MAX
] = {
93 [NVME_ARBITRATION
] = true,
94 [NVME_POWER_MANAGEMENT
] = true,
95 [NVME_TEMPERATURE_THRESHOLD
] = true,
96 [NVME_ERROR_RECOVERY
] = true,
97 [NVME_VOLATILE_WRITE_CACHE
] = true,
98 [NVME_NUMBER_OF_QUEUES
] = true,
99 [NVME_INTERRUPT_COALESCING
] = true,
100 [NVME_INTERRUPT_VECTOR_CONF
] = true,
101 [NVME_WRITE_ATOMICITY
] = true,
102 [NVME_ASYNCHRONOUS_EVENT_CONF
] = true,
103 [NVME_TIMESTAMP
] = true,
106 static const uint32_t nvme_feature_cap
[NVME_FID_MAX
] = {
107 [NVME_TEMPERATURE_THRESHOLD
] = NVME_FEAT_CAP_CHANGE
,
108 [NVME_ERROR_RECOVERY
] = NVME_FEAT_CAP_CHANGE
| NVME_FEAT_CAP_NS
,
109 [NVME_VOLATILE_WRITE_CACHE
] = NVME_FEAT_CAP_CHANGE
,
110 [NVME_NUMBER_OF_QUEUES
] = NVME_FEAT_CAP_CHANGE
,
111 [NVME_ASYNCHRONOUS_EVENT_CONF
] = NVME_FEAT_CAP_CHANGE
,
112 [NVME_TIMESTAMP
] = NVME_FEAT_CAP_CHANGE
,
115 static void nvme_process_sq(void *opaque
);
117 static uint16_t nvme_cid(NvmeRequest
*req
)
123 return le16_to_cpu(req
->cqe
.cid
);
126 static uint16_t nvme_sqid(NvmeRequest
*req
)
128 return le16_to_cpu(req
->sq
->sqid
);
131 static bool nvme_addr_is_cmb(NvmeCtrl
*n
, hwaddr addr
)
133 hwaddr low
= n
->ctrl_mem
.addr
;
134 hwaddr hi
= n
->ctrl_mem
.addr
+ int128_get64(n
->ctrl_mem
.size
);
136 return addr
>= low
&& addr
< hi
;
139 static inline void *nvme_addr_to_cmb(NvmeCtrl
*n
, hwaddr addr
)
141 assert(nvme_addr_is_cmb(n
, addr
));
143 return &n
->cmbuf
[addr
- n
->ctrl_mem
.addr
];
146 static int nvme_addr_read(NvmeCtrl
*n
, hwaddr addr
, void *buf
, int size
)
148 hwaddr hi
= addr
+ size
- 1;
153 if (n
->bar
.cmbsz
&& nvme_addr_is_cmb(n
, addr
) && nvme_addr_is_cmb(n
, hi
)) {
154 memcpy(buf
, nvme_addr_to_cmb(n
, addr
), size
);
158 return pci_dma_read(&n
->parent_obj
, addr
, buf
, size
);
161 static bool nvme_nsid_valid(NvmeCtrl
*n
, uint32_t nsid
)
163 return nsid
&& (nsid
== NVME_NSID_BROADCAST
|| nsid
<= n
->num_namespaces
);
166 static int nvme_check_sqid(NvmeCtrl
*n
, uint16_t sqid
)
168 return sqid
< n
->params
.max_ioqpairs
+ 1 && n
->sq
[sqid
] != NULL
? 0 : -1;
171 static int nvme_check_cqid(NvmeCtrl
*n
, uint16_t cqid
)
173 return cqid
< n
->params
.max_ioqpairs
+ 1 && n
->cq
[cqid
] != NULL
? 0 : -1;
176 static void nvme_inc_cq_tail(NvmeCQueue
*cq
)
179 if (cq
->tail
>= cq
->size
) {
181 cq
->phase
= !cq
->phase
;
185 static void nvme_inc_sq_head(NvmeSQueue
*sq
)
187 sq
->head
= (sq
->head
+ 1) % sq
->size
;
190 static uint8_t nvme_cq_full(NvmeCQueue
*cq
)
192 return (cq
->tail
+ 1) % cq
->size
== cq
->head
;
195 static uint8_t nvme_sq_empty(NvmeSQueue
*sq
)
197 return sq
->head
== sq
->tail
;
200 static void nvme_irq_check(NvmeCtrl
*n
)
202 if (msix_enabled(&(n
->parent_obj
))) {
205 if (~n
->bar
.intms
& n
->irq_status
) {
206 pci_irq_assert(&n
->parent_obj
);
208 pci_irq_deassert(&n
->parent_obj
);
212 static void nvme_irq_assert(NvmeCtrl
*n
, NvmeCQueue
*cq
)
214 if (cq
->irq_enabled
) {
215 if (msix_enabled(&(n
->parent_obj
))) {
216 trace_pci_nvme_irq_msix(cq
->vector
);
217 msix_notify(&(n
->parent_obj
), cq
->vector
);
219 trace_pci_nvme_irq_pin();
220 assert(cq
->vector
< 32);
221 n
->irq_status
|= 1 << cq
->vector
;
225 trace_pci_nvme_irq_masked();
229 static void nvme_irq_deassert(NvmeCtrl
*n
, NvmeCQueue
*cq
)
231 if (cq
->irq_enabled
) {
232 if (msix_enabled(&(n
->parent_obj
))) {
235 assert(cq
->vector
< 32);
236 n
->irq_status
&= ~(1 << cq
->vector
);
242 static void nvme_req_clear(NvmeRequest
*req
)
246 memset(&req
->cqe
, 0x0, sizeof(req
->cqe
));
247 req
->status
= NVME_SUCCESS
;
250 static void nvme_req_exit(NvmeRequest
*req
)
253 qemu_sglist_destroy(&req
->qsg
);
257 qemu_iovec_destroy(&req
->iov
);
261 static uint16_t nvme_map_addr_cmb(NvmeCtrl
*n
, QEMUIOVector
*iov
, hwaddr addr
,
268 trace_pci_nvme_map_addr_cmb(addr
, len
);
270 if (!nvme_addr_is_cmb(n
, addr
) || !nvme_addr_is_cmb(n
, addr
+ len
- 1)) {
271 return NVME_DATA_TRAS_ERROR
;
274 qemu_iovec_add(iov
, nvme_addr_to_cmb(n
, addr
), len
);
279 static uint16_t nvme_map_addr(NvmeCtrl
*n
, QEMUSGList
*qsg
, QEMUIOVector
*iov
,
280 hwaddr addr
, size_t len
)
286 trace_pci_nvme_map_addr(addr
, len
);
288 if (nvme_addr_is_cmb(n
, addr
)) {
289 if (qsg
&& qsg
->sg
) {
290 return NVME_INVALID_USE_OF_CMB
| NVME_DNR
;
296 qemu_iovec_init(iov
, 1);
299 return nvme_map_addr_cmb(n
, iov
, addr
, len
);
302 if (iov
&& iov
->iov
) {
303 return NVME_INVALID_USE_OF_CMB
| NVME_DNR
;
309 pci_dma_sglist_init(qsg
, &n
->parent_obj
, 1);
312 qemu_sglist_add(qsg
, addr
, len
);
317 static uint16_t nvme_map_prp(NvmeCtrl
*n
, uint64_t prp1
, uint64_t prp2
,
318 uint32_t len
, NvmeRequest
*req
)
320 hwaddr trans_len
= n
->page_size
- (prp1
% n
->page_size
);
321 trans_len
= MIN(len
, trans_len
);
322 int num_prps
= (len
>> n
->page_bits
) + 1;
324 bool prp_list_in_cmb
= false;
327 QEMUSGList
*qsg
= &req
->qsg
;
328 QEMUIOVector
*iov
= &req
->iov
;
330 trace_pci_nvme_map_prp(trans_len
, len
, prp1
, prp2
, num_prps
);
332 if (nvme_addr_is_cmb(n
, prp1
)) {
333 qemu_iovec_init(iov
, num_prps
);
335 pci_dma_sglist_init(qsg
, &n
->parent_obj
, num_prps
);
338 status
= nvme_map_addr(n
, qsg
, iov
, prp1
, trans_len
);
345 if (len
> n
->page_size
) {
346 uint64_t prp_list
[n
->max_prp_ents
];
347 uint32_t nents
, prp_trans
;
350 if (nvme_addr_is_cmb(n
, prp2
)) {
351 prp_list_in_cmb
= true;
354 nents
= (len
+ n
->page_size
- 1) >> n
->page_bits
;
355 prp_trans
= MIN(n
->max_prp_ents
, nents
) * sizeof(uint64_t);
356 ret
= nvme_addr_read(n
, prp2
, (void *)prp_list
, prp_trans
);
358 trace_pci_nvme_err_addr_read(prp2
);
359 return NVME_DATA_TRAS_ERROR
;
362 uint64_t prp_ent
= le64_to_cpu(prp_list
[i
]);
364 if (i
== n
->max_prp_ents
- 1 && len
> n
->page_size
) {
365 if (unlikely(prp_ent
& (n
->page_size
- 1))) {
366 trace_pci_nvme_err_invalid_prplist_ent(prp_ent
);
367 return NVME_INVALID_PRP_OFFSET
| NVME_DNR
;
370 if (prp_list_in_cmb
!= nvme_addr_is_cmb(n
, prp_ent
)) {
371 return NVME_INVALID_USE_OF_CMB
| NVME_DNR
;
375 nents
= (len
+ n
->page_size
- 1) >> n
->page_bits
;
376 prp_trans
= MIN(n
->max_prp_ents
, nents
) * sizeof(uint64_t);
377 ret
= nvme_addr_read(n
, prp_ent
, (void *)prp_list
,
380 trace_pci_nvme_err_addr_read(prp_ent
);
381 return NVME_DATA_TRAS_ERROR
;
383 prp_ent
= le64_to_cpu(prp_list
[i
]);
386 if (unlikely(prp_ent
& (n
->page_size
- 1))) {
387 trace_pci_nvme_err_invalid_prplist_ent(prp_ent
);
388 return NVME_INVALID_PRP_OFFSET
| NVME_DNR
;
391 trans_len
= MIN(len
, n
->page_size
);
392 status
= nvme_map_addr(n
, qsg
, iov
, prp_ent
, trans_len
);
401 if (unlikely(prp2
& (n
->page_size
- 1))) {
402 trace_pci_nvme_err_invalid_prp2_align(prp2
);
403 return NVME_INVALID_PRP_OFFSET
| NVME_DNR
;
405 status
= nvme_map_addr(n
, qsg
, iov
, prp2
, len
);
416 * Map 'nsgld' data descriptors from 'segment'. The function will subtract the
417 * number of bytes mapped in len.
419 static uint16_t nvme_map_sgl_data(NvmeCtrl
*n
, QEMUSGList
*qsg
,
421 NvmeSglDescriptor
*segment
, uint64_t nsgld
,
422 size_t *len
, NvmeRequest
*req
)
424 dma_addr_t addr
, trans_len
;
428 for (int i
= 0; i
< nsgld
; i
++) {
429 uint8_t type
= NVME_SGL_TYPE(segment
[i
].type
);
432 case NVME_SGL_DESCR_TYPE_BIT_BUCKET
:
433 if (req
->cmd
.opcode
== NVME_CMD_WRITE
) {
436 case NVME_SGL_DESCR_TYPE_DATA_BLOCK
:
438 case NVME_SGL_DESCR_TYPE_SEGMENT
:
439 case NVME_SGL_DESCR_TYPE_LAST_SEGMENT
:
440 return NVME_INVALID_NUM_SGL_DESCRS
| NVME_DNR
;
442 return NVME_SGL_DESCR_TYPE_INVALID
| NVME_DNR
;
445 dlen
= le32_to_cpu(segment
[i
].len
);
453 * All data has been mapped, but the SGL contains additional
454 * segments and/or descriptors. The controller might accept
455 * ignoring the rest of the SGL.
457 uint32_t sgls
= le32_to_cpu(n
->id_ctrl
.sgls
);
458 if (sgls
& NVME_CTRL_SGLS_EXCESS_LENGTH
) {
462 trace_pci_nvme_err_invalid_sgl_excess_length(nvme_cid(req
));
463 return NVME_DATA_SGL_LEN_INVALID
| NVME_DNR
;
466 trans_len
= MIN(*len
, dlen
);
468 if (type
== NVME_SGL_DESCR_TYPE_BIT_BUCKET
) {
472 addr
= le64_to_cpu(segment
[i
].addr
);
474 if (UINT64_MAX
- addr
< dlen
) {
475 return NVME_DATA_SGL_LEN_INVALID
| NVME_DNR
;
478 status
= nvme_map_addr(n
, qsg
, iov
, addr
, trans_len
);
490 static uint16_t nvme_map_sgl(NvmeCtrl
*n
, QEMUSGList
*qsg
, QEMUIOVector
*iov
,
491 NvmeSglDescriptor sgl
, size_t len
,
495 * Read the segment in chunks of 256 descriptors (one 4k page) to avoid
496 * dynamically allocating a potentially huge SGL. The spec allows the SGL
497 * to be larger (as in number of bytes required to describe the SGL
498 * descriptors and segment chain) than the command transfer size, so it is
499 * not bounded by MDTS.
501 const int SEG_CHUNK_SIZE
= 256;
503 NvmeSglDescriptor segment
[SEG_CHUNK_SIZE
], *sgld
, *last_sgld
;
507 bool sgl_in_cmb
= false;
512 addr
= le64_to_cpu(sgl
.addr
);
514 trace_pci_nvme_map_sgl(nvme_cid(req
), NVME_SGL_TYPE(sgl
.type
), len
);
517 * If the entire transfer can be described with a single data block it can
518 * be mapped directly.
520 if (NVME_SGL_TYPE(sgl
.type
) == NVME_SGL_DESCR_TYPE_DATA_BLOCK
) {
521 status
= nvme_map_sgl_data(n
, qsg
, iov
, sgld
, 1, &len
, req
);
530 * If the segment is located in the CMB, the submission queue of the
531 * request must also reside there.
533 if (nvme_addr_is_cmb(n
, addr
)) {
534 if (!nvme_addr_is_cmb(n
, req
->sq
->dma_addr
)) {
535 return NVME_INVALID_USE_OF_CMB
| NVME_DNR
;
542 switch (NVME_SGL_TYPE(sgld
->type
)) {
543 case NVME_SGL_DESCR_TYPE_SEGMENT
:
544 case NVME_SGL_DESCR_TYPE_LAST_SEGMENT
:
547 return NVME_INVALID_SGL_SEG_DESCR
| NVME_DNR
;
550 seg_len
= le32_to_cpu(sgld
->len
);
552 /* check the length of the (Last) Segment descriptor */
553 if ((!seg_len
|| seg_len
& 0xf) &&
554 (NVME_SGL_TYPE(sgld
->type
) != NVME_SGL_DESCR_TYPE_BIT_BUCKET
)) {
555 return NVME_INVALID_SGL_SEG_DESCR
| NVME_DNR
;
558 if (UINT64_MAX
- addr
< seg_len
) {
559 return NVME_DATA_SGL_LEN_INVALID
| NVME_DNR
;
562 nsgld
= seg_len
/ sizeof(NvmeSglDescriptor
);
564 while (nsgld
> SEG_CHUNK_SIZE
) {
565 if (nvme_addr_read(n
, addr
, segment
, sizeof(segment
))) {
566 trace_pci_nvme_err_addr_read(addr
);
567 status
= NVME_DATA_TRAS_ERROR
;
571 status
= nvme_map_sgl_data(n
, qsg
, iov
, segment
, SEG_CHUNK_SIZE
,
577 nsgld
-= SEG_CHUNK_SIZE
;
578 addr
+= SEG_CHUNK_SIZE
* sizeof(NvmeSglDescriptor
);
581 ret
= nvme_addr_read(n
, addr
, segment
, nsgld
*
582 sizeof(NvmeSglDescriptor
));
584 trace_pci_nvme_err_addr_read(addr
);
585 status
= NVME_DATA_TRAS_ERROR
;
589 last_sgld
= &segment
[nsgld
- 1];
592 * If the segment ends with a Data Block or Bit Bucket Descriptor Type,
595 switch (NVME_SGL_TYPE(last_sgld
->type
)) {
596 case NVME_SGL_DESCR_TYPE_DATA_BLOCK
:
597 case NVME_SGL_DESCR_TYPE_BIT_BUCKET
:
598 status
= nvme_map_sgl_data(n
, qsg
, iov
, segment
, nsgld
, &len
, req
);
610 * If the last descriptor was not a Data Block or Bit Bucket, then the
611 * current segment must not be a Last Segment.
613 if (NVME_SGL_TYPE(sgld
->type
) == NVME_SGL_DESCR_TYPE_LAST_SEGMENT
) {
614 status
= NVME_INVALID_SGL_SEG_DESCR
| NVME_DNR
;
619 addr
= le64_to_cpu(sgld
->addr
);
622 * Do not map the last descriptor; it will be a Segment or Last Segment
623 * descriptor and is handled by the next iteration.
625 status
= nvme_map_sgl_data(n
, qsg
, iov
, segment
, nsgld
- 1, &len
, req
);
631 * If the next segment is in the CMB, make sure that the sgl was
632 * already located there.
634 if (sgl_in_cmb
!= nvme_addr_is_cmb(n
, addr
)) {
635 status
= NVME_INVALID_USE_OF_CMB
| NVME_DNR
;
641 /* if there is any residual left in len, the SGL was too short */
643 status
= NVME_DATA_SGL_LEN_INVALID
| NVME_DNR
;
651 qemu_iovec_destroy(iov
);
655 qemu_sglist_destroy(qsg
);
661 static uint16_t nvme_map_dptr(NvmeCtrl
*n
, size_t len
, NvmeRequest
*req
)
665 switch (NVME_CMD_FLAGS_PSDT(req
->cmd
.flags
)) {
667 prp1
= le64_to_cpu(req
->cmd
.dptr
.prp1
);
668 prp2
= le64_to_cpu(req
->cmd
.dptr
.prp2
);
670 return nvme_map_prp(n
, prp1
, prp2
, len
, req
);
671 case NVME_PSDT_SGL_MPTR_CONTIGUOUS
:
672 case NVME_PSDT_SGL_MPTR_SGL
:
673 /* SGLs shall not be used for Admin commands in NVMe over PCIe */
674 if (!req
->sq
->sqid
) {
675 return NVME_INVALID_FIELD
| NVME_DNR
;
678 return nvme_map_sgl(n
, &req
->qsg
, &req
->iov
, req
->cmd
.dptr
.sgl
, len
,
681 return NVME_INVALID_FIELD
;
685 static uint16_t nvme_dma(NvmeCtrl
*n
, uint8_t *ptr
, uint32_t len
,
686 DMADirection dir
, NvmeRequest
*req
)
688 uint16_t status
= NVME_SUCCESS
;
690 status
= nvme_map_dptr(n
, len
, req
);
695 /* assert that only one of qsg and iov carries data */
696 assert((req
->qsg
.nsg
> 0) != (req
->iov
.niov
> 0));
698 if (req
->qsg
.nsg
> 0) {
701 if (dir
== DMA_DIRECTION_TO_DEVICE
) {
702 residual
= dma_buf_write(ptr
, len
, &req
->qsg
);
704 residual
= dma_buf_read(ptr
, len
, &req
->qsg
);
707 if (unlikely(residual
)) {
708 trace_pci_nvme_err_invalid_dma();
709 status
= NVME_INVALID_FIELD
| NVME_DNR
;
714 if (dir
== DMA_DIRECTION_TO_DEVICE
) {
715 bytes
= qemu_iovec_to_buf(&req
->iov
, 0, ptr
, len
);
717 bytes
= qemu_iovec_from_buf(&req
->iov
, 0, ptr
, len
);
720 if (unlikely(bytes
!= len
)) {
721 trace_pci_nvme_err_invalid_dma();
722 status
= NVME_INVALID_FIELD
| NVME_DNR
;
729 static void nvme_post_cqes(void *opaque
)
731 NvmeCQueue
*cq
= opaque
;
732 NvmeCtrl
*n
= cq
->ctrl
;
733 NvmeRequest
*req
, *next
;
736 QTAILQ_FOREACH_SAFE(req
, &cq
->req_list
, entry
, next
) {
740 if (nvme_cq_full(cq
)) {
745 req
->cqe
.status
= cpu_to_le16((req
->status
<< 1) | cq
->phase
);
746 req
->cqe
.sq_id
= cpu_to_le16(sq
->sqid
);
747 req
->cqe
.sq_head
= cpu_to_le16(sq
->head
);
748 addr
= cq
->dma_addr
+ cq
->tail
* n
->cqe_size
;
749 ret
= pci_dma_write(&n
->parent_obj
, addr
, (void *)&req
->cqe
,
752 trace_pci_nvme_err_addr_write(addr
);
753 trace_pci_nvme_err_cfs();
754 n
->bar
.csts
= NVME_CSTS_FAILED
;
757 QTAILQ_REMOVE(&cq
->req_list
, req
, entry
);
758 nvme_inc_cq_tail(cq
);
760 QTAILQ_INSERT_TAIL(&sq
->req_list
, req
, entry
);
762 if (cq
->tail
!= cq
->head
) {
763 nvme_irq_assert(n
, cq
);
767 static void nvme_enqueue_req_completion(NvmeCQueue
*cq
, NvmeRequest
*req
)
769 assert(cq
->cqid
== req
->sq
->cqid
);
770 trace_pci_nvme_enqueue_req_completion(nvme_cid(req
), cq
->cqid
,
774 trace_pci_nvme_err_req_status(nvme_cid(req
), nvme_nsid(req
->ns
),
775 req
->status
, req
->cmd
.opcode
);
778 QTAILQ_REMOVE(&req
->sq
->out_req_list
, req
, entry
);
779 QTAILQ_INSERT_TAIL(&cq
->req_list
, req
, entry
);
780 timer_mod(cq
->timer
, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) + 500);
783 static void nvme_process_aers(void *opaque
)
785 NvmeCtrl
*n
= opaque
;
786 NvmeAsyncEvent
*event
, *next
;
788 trace_pci_nvme_process_aers(n
->aer_queued
);
790 QTAILQ_FOREACH_SAFE(event
, &n
->aer_queue
, entry
, next
) {
792 NvmeAerResult
*result
;
794 /* can't post cqe if there is nothing to complete */
795 if (!n
->outstanding_aers
) {
796 trace_pci_nvme_no_outstanding_aers();
800 /* ignore if masked (cqe posted, but event not cleared) */
801 if (n
->aer_mask
& (1 << event
->result
.event_type
)) {
802 trace_pci_nvme_aer_masked(event
->result
.event_type
, n
->aer_mask
);
806 QTAILQ_REMOVE(&n
->aer_queue
, event
, entry
);
809 n
->aer_mask
|= 1 << event
->result
.event_type
;
810 n
->outstanding_aers
--;
812 req
= n
->aer_reqs
[n
->outstanding_aers
];
814 result
= (NvmeAerResult
*) &req
->cqe
.result
;
815 result
->event_type
= event
->result
.event_type
;
816 result
->event_info
= event
->result
.event_info
;
817 result
->log_page
= event
->result
.log_page
;
820 trace_pci_nvme_aer_post_cqe(result
->event_type
, result
->event_info
,
823 nvme_enqueue_req_completion(&n
->admin_cq
, req
);
827 static void nvme_enqueue_event(NvmeCtrl
*n
, uint8_t event_type
,
828 uint8_t event_info
, uint8_t log_page
)
830 NvmeAsyncEvent
*event
;
832 trace_pci_nvme_enqueue_event(event_type
, event_info
, log_page
);
834 if (n
->aer_queued
== n
->params
.aer_max_queued
) {
835 trace_pci_nvme_enqueue_event_noqueue(n
->aer_queued
);
839 event
= g_new(NvmeAsyncEvent
, 1);
840 event
->result
= (NvmeAerResult
) {
841 .event_type
= event_type
,
842 .event_info
= event_info
,
843 .log_page
= log_page
,
846 QTAILQ_INSERT_TAIL(&n
->aer_queue
, event
, entry
);
849 nvme_process_aers(n
);
852 static void nvme_clear_events(NvmeCtrl
*n
, uint8_t event_type
)
854 n
->aer_mask
&= ~(1 << event_type
);
855 if (!QTAILQ_EMPTY(&n
->aer_queue
)) {
856 nvme_process_aers(n
);
860 static inline uint16_t nvme_check_mdts(NvmeCtrl
*n
, size_t len
)
862 uint8_t mdts
= n
->params
.mdts
;
864 if (mdts
&& len
> n
->page_size
<< mdts
) {
865 return NVME_INVALID_FIELD
| NVME_DNR
;
871 static inline uint16_t nvme_check_bounds(NvmeNamespace
*ns
, uint64_t slba
,
874 uint64_t nsze
= le64_to_cpu(ns
->id_ns
.nsze
);
876 if (unlikely(UINT64_MAX
- slba
< nlb
|| slba
+ nlb
> nsze
)) {
877 return NVME_LBA_RANGE
| NVME_DNR
;
883 static uint16_t nvme_check_dulbe(NvmeNamespace
*ns
, uint64_t slba
,
886 BlockDriverState
*bs
= blk_bs(ns
->blkconf
.blk
);
888 int64_t pnum
= 0, bytes
= nvme_l2b(ns
, nlb
);
889 int64_t offset
= nvme_l2b(ns
, slba
);
893 Error
*local_err
= NULL
;
896 * `pnum` holds the number of bytes after offset that shares the same
897 * allocation status as the byte at offset. If `pnum` is different from
898 * `bytes`, we should check the allocation status of the next range and
899 * continue this until all bytes have been checked.
904 ret
= bdrv_block_status(bs
, offset
, bytes
, &pnum
, NULL
, NULL
);
906 error_setg_errno(&local_err
, -ret
, "unable to get block status");
907 error_report_err(local_err
);
909 return NVME_INTERNAL_DEV_ERROR
;
912 zeroed
= !!(ret
& BDRV_BLOCK_ZERO
);
914 trace_pci_nvme_block_status(offset
, bytes
, pnum
, ret
, zeroed
);
921 } while (pnum
!= bytes
);
926 static void nvme_aio_err(NvmeRequest
*req
, int ret
)
928 uint16_t status
= NVME_SUCCESS
;
929 Error
*local_err
= NULL
;
931 switch (req
->cmd
.opcode
) {
933 status
= NVME_UNRECOVERED_READ
;
937 case NVME_CMD_WRITE_ZEROES
:
938 status
= NVME_WRITE_FAULT
;
941 status
= NVME_INTERNAL_DEV_ERROR
;
945 trace_pci_nvme_err_aio(nvme_cid(req
), strerror(ret
), status
);
947 error_setg_errno(&local_err
, -ret
, "aio failed");
948 error_report_err(local_err
);
951 * Set the command status code to the first encountered error but allow a
952 * subsequent Internal Device Error to trump it.
954 if (req
->status
&& status
!= NVME_INTERNAL_DEV_ERROR
) {
958 req
->status
= status
;
961 static void nvme_rw_cb(void *opaque
, int ret
)
963 NvmeRequest
*req
= opaque
;
964 NvmeNamespace
*ns
= req
->ns
;
966 BlockBackend
*blk
= ns
->blkconf
.blk
;
967 BlockAcctCookie
*acct
= &req
->acct
;
968 BlockAcctStats
*stats
= blk_get_stats(blk
);
970 trace_pci_nvme_rw_cb(nvme_cid(req
), blk_name(blk
));
973 block_acct_done(stats
, acct
);
975 block_acct_failed(stats
, acct
);
976 nvme_aio_err(req
, ret
);
979 nvme_enqueue_req_completion(nvme_cq(req
), req
);
982 static void nvme_aio_discard_cb(void *opaque
, int ret
)
984 NvmeRequest
*req
= opaque
;
985 uintptr_t *discards
= (uintptr_t *)&req
->opaque
;
987 trace_pci_nvme_aio_discard_cb(nvme_cid(req
));
990 nvme_aio_err(req
, ret
);
999 nvme_enqueue_req_completion(nvme_cq(req
), req
);
1002 struct nvme_compare_ctx
{
1008 static void nvme_compare_cb(void *opaque
, int ret
)
1010 NvmeRequest
*req
= opaque
;
1011 NvmeNamespace
*ns
= req
->ns
;
1012 struct nvme_compare_ctx
*ctx
= req
->opaque
;
1013 g_autofree
uint8_t *buf
= NULL
;
1016 trace_pci_nvme_compare_cb(nvme_cid(req
));
1019 block_acct_done(blk_get_stats(ns
->blkconf
.blk
), &req
->acct
);
1021 block_acct_failed(blk_get_stats(ns
->blkconf
.blk
), &req
->acct
);
1022 nvme_aio_err(req
, ret
);
1026 buf
= g_malloc(ctx
->len
);
1028 status
= nvme_dma(nvme_ctrl(req
), buf
, ctx
->len
, DMA_DIRECTION_TO_DEVICE
,
1031 req
->status
= status
;
1035 if (memcmp(buf
, ctx
->bounce
, ctx
->len
)) {
1036 req
->status
= NVME_CMP_FAILURE
;
1040 qemu_iovec_destroy(&ctx
->iov
);
1041 g_free(ctx
->bounce
);
1044 nvme_enqueue_req_completion(nvme_cq(req
), req
);
1047 static uint16_t nvme_dsm(NvmeCtrl
*n
, NvmeRequest
*req
)
1049 NvmeNamespace
*ns
= req
->ns
;
1050 NvmeDsmCmd
*dsm
= (NvmeDsmCmd
*) &req
->cmd
;
1052 uint32_t attr
= le32_to_cpu(dsm
->attributes
);
1053 uint32_t nr
= (le32_to_cpu(dsm
->nr
) & 0xff) + 1;
1055 uint16_t status
= NVME_SUCCESS
;
1057 trace_pci_nvme_dsm(nvme_cid(req
), nvme_nsid(ns
), nr
, attr
);
1059 if (attr
& NVME_DSMGMT_AD
) {
1062 NvmeDsmRange range
[nr
];
1063 uintptr_t *discards
= (uintptr_t *)&req
->opaque
;
1065 status
= nvme_dma(n
, (uint8_t *)range
, sizeof(range
),
1066 DMA_DIRECTION_TO_DEVICE
, req
);
1072 * AIO callbacks may be called immediately, so initialize discards to 1
1073 * to make sure the the callback does not complete the request before
1074 * all discards have been issued.
1078 for (int i
= 0; i
< nr
; i
++) {
1079 uint64_t slba
= le64_to_cpu(range
[i
].slba
);
1080 uint32_t nlb
= le32_to_cpu(range
[i
].nlb
);
1082 if (nvme_check_bounds(ns
, slba
, nlb
)) {
1083 trace_pci_nvme_err_invalid_lba_range(slba
, nlb
,
1088 trace_pci_nvme_dsm_deallocate(nvme_cid(req
), nvme_nsid(ns
), slba
,
1091 offset
= nvme_l2b(ns
, slba
);
1092 len
= nvme_l2b(ns
, nlb
);
1095 size_t bytes
= MIN(BDRV_REQUEST_MAX_BYTES
, len
);
1099 blk_aio_pdiscard(ns
->blkconf
.blk
, offset
, bytes
,
1100 nvme_aio_discard_cb
, req
);
1107 /* account for the 1-initialization */
1111 status
= NVME_NO_COMPLETE
;
1113 status
= req
->status
;
1120 static uint16_t nvme_compare(NvmeCtrl
*n
, NvmeRequest
*req
)
1122 NvmeRwCmd
*rw
= (NvmeRwCmd
*)&req
->cmd
;
1123 NvmeNamespace
*ns
= req
->ns
;
1124 BlockBackend
*blk
= ns
->blkconf
.blk
;
1125 uint64_t slba
= le64_to_cpu(rw
->slba
);
1126 uint32_t nlb
= le16_to_cpu(rw
->nlb
) + 1;
1127 size_t len
= nvme_l2b(ns
, nlb
);
1128 int64_t offset
= nvme_l2b(ns
, slba
);
1129 uint8_t *bounce
= NULL
;
1130 struct nvme_compare_ctx
*ctx
= NULL
;
1133 trace_pci_nvme_compare(nvme_cid(req
), nvme_nsid(ns
), slba
, nlb
);
1135 status
= nvme_check_mdts(n
, len
);
1137 trace_pci_nvme_err_mdts(nvme_cid(req
), len
);
1141 status
= nvme_check_bounds(ns
, slba
, nlb
);
1143 trace_pci_nvme_err_invalid_lba_range(slba
, nlb
, ns
->id_ns
.nsze
);
1147 if (NVME_ERR_REC_DULBE(ns
->features
.err_rec
)) {
1148 status
= nvme_check_dulbe(ns
, slba
, nlb
);
1154 bounce
= g_malloc(len
);
1156 ctx
= g_new(struct nvme_compare_ctx
, 1);
1157 ctx
->bounce
= bounce
;
1162 qemu_iovec_init(&ctx
->iov
, 1);
1163 qemu_iovec_add(&ctx
->iov
, bounce
, len
);
1165 block_acct_start(blk_get_stats(blk
), &req
->acct
, len
, BLOCK_ACCT_READ
);
1166 blk_aio_preadv(blk
, offset
, &ctx
->iov
, 0, nvme_compare_cb
, req
);
1168 return NVME_NO_COMPLETE
;
1171 static uint16_t nvme_flush(NvmeCtrl
*n
, NvmeRequest
*req
)
1173 block_acct_start(blk_get_stats(req
->ns
->blkconf
.blk
), &req
->acct
, 0,
1175 req
->aiocb
= blk_aio_flush(req
->ns
->blkconf
.blk
, nvme_rw_cb
, req
);
1176 return NVME_NO_COMPLETE
;
1179 static uint16_t nvme_write_zeroes(NvmeCtrl
*n
, NvmeRequest
*req
)
1181 NvmeRwCmd
*rw
= (NvmeRwCmd
*)&req
->cmd
;
1182 NvmeNamespace
*ns
= req
->ns
;
1183 uint64_t slba
= le64_to_cpu(rw
->slba
);
1184 uint32_t nlb
= (uint32_t)le16_to_cpu(rw
->nlb
) + 1;
1185 uint64_t offset
= nvme_l2b(ns
, slba
);
1186 uint32_t count
= nvme_l2b(ns
, nlb
);
1189 trace_pci_nvme_write_zeroes(nvme_cid(req
), nvme_nsid(ns
), slba
, nlb
);
1191 status
= nvme_check_bounds(ns
, slba
, nlb
);
1193 trace_pci_nvme_err_invalid_lba_range(slba
, nlb
, ns
->id_ns
.nsze
);
1197 block_acct_start(blk_get_stats(req
->ns
->blkconf
.blk
), &req
->acct
, 0,
1199 req
->aiocb
= blk_aio_pwrite_zeroes(req
->ns
->blkconf
.blk
, offset
, count
,
1200 BDRV_REQ_MAY_UNMAP
, nvme_rw_cb
, req
);
1201 return NVME_NO_COMPLETE
;
1204 static uint16_t nvme_rw(NvmeCtrl
*n
, NvmeRequest
*req
)
1206 NvmeRwCmd
*rw
= (NvmeRwCmd
*)&req
->cmd
;
1207 NvmeNamespace
*ns
= req
->ns
;
1208 uint32_t nlb
= (uint32_t)le16_to_cpu(rw
->nlb
) + 1;
1209 uint64_t slba
= le64_to_cpu(rw
->slba
);
1211 uint64_t data_size
= nvme_l2b(ns
, nlb
);
1212 uint64_t data_offset
= nvme_l2b(ns
, slba
);
1213 enum BlockAcctType acct
= req
->cmd
.opcode
== NVME_CMD_WRITE
?
1214 BLOCK_ACCT_WRITE
: BLOCK_ACCT_READ
;
1215 BlockBackend
*blk
= ns
->blkconf
.blk
;
1218 trace_pci_nvme_rw(nvme_cid(req
), nvme_io_opc_str(rw
->opcode
),
1219 nvme_nsid(ns
), nlb
, data_size
, slba
);
1221 status
= nvme_check_mdts(n
, data_size
);
1223 trace_pci_nvme_err_mdts(nvme_cid(req
), data_size
);
1227 status
= nvme_check_bounds(ns
, slba
, nlb
);
1229 trace_pci_nvme_err_invalid_lba_range(slba
, nlb
, ns
->id_ns
.nsze
);
1233 if (acct
== BLOCK_ACCT_READ
) {
1234 if (NVME_ERR_REC_DULBE(ns
->features
.err_rec
)) {
1235 status
= nvme_check_dulbe(ns
, slba
, nlb
);
1242 status
= nvme_map_dptr(n
, data_size
, req
);
1247 block_acct_start(blk_get_stats(blk
), &req
->acct
, data_size
, acct
);
1249 if (acct
== BLOCK_ACCT_WRITE
) {
1250 req
->aiocb
= dma_blk_write(blk
, &req
->qsg
, data_offset
,
1251 BDRV_SECTOR_SIZE
, nvme_rw_cb
, req
);
1253 req
->aiocb
= dma_blk_read(blk
, &req
->qsg
, data_offset
,
1254 BDRV_SECTOR_SIZE
, nvme_rw_cb
, req
);
1257 if (acct
== BLOCK_ACCT_WRITE
) {
1258 req
->aiocb
= blk_aio_pwritev(blk
, data_offset
, &req
->iov
, 0,
1261 req
->aiocb
= blk_aio_preadv(blk
, data_offset
, &req
->iov
, 0,
1265 return NVME_NO_COMPLETE
;
1268 block_acct_invalid(blk_get_stats(ns
->blkconf
.blk
), acct
);
1272 static uint16_t nvme_io_cmd(NvmeCtrl
*n
, NvmeRequest
*req
)
1274 uint32_t nsid
= le32_to_cpu(req
->cmd
.nsid
);
1276 trace_pci_nvme_io_cmd(nvme_cid(req
), nsid
, nvme_sqid(req
),
1277 req
->cmd
.opcode
, nvme_io_opc_str(req
->cmd
.opcode
));
1279 if (NVME_CC_CSS(n
->bar
.cc
) == NVME_CC_CSS_ADMIN_ONLY
) {
1280 return NVME_INVALID_OPCODE
| NVME_DNR
;
1283 if (!nvme_nsid_valid(n
, nsid
)) {
1284 return NVME_INVALID_NSID
| NVME_DNR
;
1287 req
->ns
= nvme_ns(n
, nsid
);
1288 if (unlikely(!req
->ns
)) {
1289 return NVME_INVALID_FIELD
| NVME_DNR
;
1292 switch (req
->cmd
.opcode
) {
1293 case NVME_CMD_FLUSH
:
1294 return nvme_flush(n
, req
);
1295 case NVME_CMD_WRITE_ZEROES
:
1296 return nvme_write_zeroes(n
, req
);
1297 case NVME_CMD_WRITE
:
1299 return nvme_rw(n
, req
);
1300 case NVME_CMD_COMPARE
:
1301 return nvme_compare(n
, req
);
1303 return nvme_dsm(n
, req
);
1305 trace_pci_nvme_err_invalid_opc(req
->cmd
.opcode
);
1306 return NVME_INVALID_OPCODE
| NVME_DNR
;
1310 static void nvme_free_sq(NvmeSQueue
*sq
, NvmeCtrl
*n
)
1312 n
->sq
[sq
->sqid
] = NULL
;
1313 timer_free(sq
->timer
);
1320 static uint16_t nvme_del_sq(NvmeCtrl
*n
, NvmeRequest
*req
)
1322 NvmeDeleteQ
*c
= (NvmeDeleteQ
*)&req
->cmd
;
1323 NvmeRequest
*r
, *next
;
1326 uint16_t qid
= le16_to_cpu(c
->qid
);
1328 if (unlikely(!qid
|| nvme_check_sqid(n
, qid
))) {
1329 trace_pci_nvme_err_invalid_del_sq(qid
);
1330 return NVME_INVALID_QID
| NVME_DNR
;
1333 trace_pci_nvme_del_sq(qid
);
1336 while (!QTAILQ_EMPTY(&sq
->out_req_list
)) {
1337 r
= QTAILQ_FIRST(&sq
->out_req_list
);
1339 blk_aio_cancel(r
->aiocb
);
1341 if (!nvme_check_cqid(n
, sq
->cqid
)) {
1342 cq
= n
->cq
[sq
->cqid
];
1343 QTAILQ_REMOVE(&cq
->sq_list
, sq
, entry
);
1346 QTAILQ_FOREACH_SAFE(r
, &cq
->req_list
, entry
, next
) {
1348 QTAILQ_REMOVE(&cq
->req_list
, r
, entry
);
1349 QTAILQ_INSERT_TAIL(&sq
->req_list
, r
, entry
);
1354 nvme_free_sq(sq
, n
);
1355 return NVME_SUCCESS
;
1358 static void nvme_init_sq(NvmeSQueue
*sq
, NvmeCtrl
*n
, uint64_t dma_addr
,
1359 uint16_t sqid
, uint16_t cqid
, uint16_t size
)
1365 sq
->dma_addr
= dma_addr
;
1369 sq
->head
= sq
->tail
= 0;
1370 sq
->io_req
= g_new0(NvmeRequest
, sq
->size
);
1372 QTAILQ_INIT(&sq
->req_list
);
1373 QTAILQ_INIT(&sq
->out_req_list
);
1374 for (i
= 0; i
< sq
->size
; i
++) {
1375 sq
->io_req
[i
].sq
= sq
;
1376 QTAILQ_INSERT_TAIL(&(sq
->req_list
), &sq
->io_req
[i
], entry
);
1378 sq
->timer
= timer_new_ns(QEMU_CLOCK_VIRTUAL
, nvme_process_sq
, sq
);
1380 assert(n
->cq
[cqid
]);
1382 QTAILQ_INSERT_TAIL(&(cq
->sq_list
), sq
, entry
);
1386 static uint16_t nvme_create_sq(NvmeCtrl
*n
, NvmeRequest
*req
)
1389 NvmeCreateSq
*c
= (NvmeCreateSq
*)&req
->cmd
;
1391 uint16_t cqid
= le16_to_cpu(c
->cqid
);
1392 uint16_t sqid
= le16_to_cpu(c
->sqid
);
1393 uint16_t qsize
= le16_to_cpu(c
->qsize
);
1394 uint16_t qflags
= le16_to_cpu(c
->sq_flags
);
1395 uint64_t prp1
= le64_to_cpu(c
->prp1
);
1397 trace_pci_nvme_create_sq(prp1
, sqid
, cqid
, qsize
, qflags
);
1399 if (unlikely(!cqid
|| nvme_check_cqid(n
, cqid
))) {
1400 trace_pci_nvme_err_invalid_create_sq_cqid(cqid
);
1401 return NVME_INVALID_CQID
| NVME_DNR
;
1403 if (unlikely(!sqid
|| sqid
> n
->params
.max_ioqpairs
||
1404 n
->sq
[sqid
] != NULL
)) {
1405 trace_pci_nvme_err_invalid_create_sq_sqid(sqid
);
1406 return NVME_INVALID_QID
| NVME_DNR
;
1408 if (unlikely(!qsize
|| qsize
> NVME_CAP_MQES(n
->bar
.cap
))) {
1409 trace_pci_nvme_err_invalid_create_sq_size(qsize
);
1410 return NVME_MAX_QSIZE_EXCEEDED
| NVME_DNR
;
1412 if (unlikely(prp1
& (n
->page_size
- 1))) {
1413 trace_pci_nvme_err_invalid_create_sq_addr(prp1
);
1414 return NVME_INVALID_PRP_OFFSET
| NVME_DNR
;
1416 if (unlikely(!(NVME_SQ_FLAGS_PC(qflags
)))) {
1417 trace_pci_nvme_err_invalid_create_sq_qflags(NVME_SQ_FLAGS_PC(qflags
));
1418 return NVME_INVALID_FIELD
| NVME_DNR
;
1420 sq
= g_malloc0(sizeof(*sq
));
1421 nvme_init_sq(sq
, n
, prp1
, sqid
, cqid
, qsize
+ 1);
1422 return NVME_SUCCESS
;
1426 uint64_t units_read
;
1427 uint64_t units_written
;
1428 uint64_t read_commands
;
1429 uint64_t write_commands
;
1432 static void nvme_set_blk_stats(NvmeNamespace
*ns
, struct nvme_stats
*stats
)
1434 BlockAcctStats
*s
= blk_get_stats(ns
->blkconf
.blk
);
1436 stats
->units_read
+= s
->nr_bytes
[BLOCK_ACCT_READ
] >> BDRV_SECTOR_BITS
;
1437 stats
->units_written
+= s
->nr_bytes
[BLOCK_ACCT_WRITE
] >> BDRV_SECTOR_BITS
;
1438 stats
->read_commands
+= s
->nr_ops
[BLOCK_ACCT_READ
];
1439 stats
->write_commands
+= s
->nr_ops
[BLOCK_ACCT_WRITE
];
1442 static uint16_t nvme_smart_info(NvmeCtrl
*n
, uint8_t rae
, uint32_t buf_len
,
1443 uint64_t off
, NvmeRequest
*req
)
1445 uint32_t nsid
= le32_to_cpu(req
->cmd
.nsid
);
1446 struct nvme_stats stats
= { 0 };
1447 NvmeSmartLog smart
= { 0 };
1452 if (off
>= sizeof(smart
)) {
1453 return NVME_INVALID_FIELD
| NVME_DNR
;
1456 if (nsid
!= 0xffffffff) {
1457 ns
= nvme_ns(n
, nsid
);
1459 return NVME_INVALID_NSID
| NVME_DNR
;
1461 nvme_set_blk_stats(ns
, &stats
);
1465 for (i
= 1; i
<= n
->num_namespaces
; i
++) {
1470 nvme_set_blk_stats(ns
, &stats
);
1474 trans_len
= MIN(sizeof(smart
) - off
, buf_len
);
1476 smart
.data_units_read
[0] = cpu_to_le64(DIV_ROUND_UP(stats
.units_read
,
1478 smart
.data_units_written
[0] = cpu_to_le64(DIV_ROUND_UP(stats
.units_written
,
1480 smart
.host_read_commands
[0] = cpu_to_le64(stats
.read_commands
);
1481 smart
.host_write_commands
[0] = cpu_to_le64(stats
.write_commands
);
1483 smart
.temperature
= cpu_to_le16(n
->temperature
);
1485 if ((n
->temperature
>= n
->features
.temp_thresh_hi
) ||
1486 (n
->temperature
<= n
->features
.temp_thresh_low
)) {
1487 smart
.critical_warning
|= NVME_SMART_TEMPERATURE
;
1490 current_ms
= qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
);
1491 smart
.power_on_hours
[0] =
1492 cpu_to_le64((((current_ms
- n
->starttime_ms
) / 1000) / 60) / 60);
1495 nvme_clear_events(n
, NVME_AER_TYPE_SMART
);
1498 return nvme_dma(n
, (uint8_t *) &smart
+ off
, trans_len
,
1499 DMA_DIRECTION_FROM_DEVICE
, req
);
1502 static uint16_t nvme_fw_log_info(NvmeCtrl
*n
, uint32_t buf_len
, uint64_t off
,
1506 NvmeFwSlotInfoLog fw_log
= {
1510 if (off
>= sizeof(fw_log
)) {
1511 return NVME_INVALID_FIELD
| NVME_DNR
;
1514 strpadcpy((char *)&fw_log
.frs1
, sizeof(fw_log
.frs1
), "1.0", ' ');
1515 trans_len
= MIN(sizeof(fw_log
) - off
, buf_len
);
1517 return nvme_dma(n
, (uint8_t *) &fw_log
+ off
, trans_len
,
1518 DMA_DIRECTION_FROM_DEVICE
, req
);
1521 static uint16_t nvme_error_info(NvmeCtrl
*n
, uint8_t rae
, uint32_t buf_len
,
1522 uint64_t off
, NvmeRequest
*req
)
1525 NvmeErrorLog errlog
;
1527 if (off
>= sizeof(errlog
)) {
1528 return NVME_INVALID_FIELD
| NVME_DNR
;
1532 nvme_clear_events(n
, NVME_AER_TYPE_ERROR
);
1535 memset(&errlog
, 0x0, sizeof(errlog
));
1536 trans_len
= MIN(sizeof(errlog
) - off
, buf_len
);
1538 return nvme_dma(n
, (uint8_t *)&errlog
, trans_len
,
1539 DMA_DIRECTION_FROM_DEVICE
, req
);
1542 static uint16_t nvme_get_log(NvmeCtrl
*n
, NvmeRequest
*req
)
1544 NvmeCmd
*cmd
= &req
->cmd
;
1546 uint32_t dw10
= le32_to_cpu(cmd
->cdw10
);
1547 uint32_t dw11
= le32_to_cpu(cmd
->cdw11
);
1548 uint32_t dw12
= le32_to_cpu(cmd
->cdw12
);
1549 uint32_t dw13
= le32_to_cpu(cmd
->cdw13
);
1550 uint8_t lid
= dw10
& 0xff;
1551 uint8_t lsp
= (dw10
>> 8) & 0xf;
1552 uint8_t rae
= (dw10
>> 15) & 0x1;
1553 uint32_t numdl
, numdu
;
1554 uint64_t off
, lpol
, lpou
;
1558 numdl
= (dw10
>> 16);
1559 numdu
= (dw11
& 0xffff);
1563 len
= (((numdu
<< 16) | numdl
) + 1) << 2;
1564 off
= (lpou
<< 32ULL) | lpol
;
1567 return NVME_INVALID_FIELD
| NVME_DNR
;
1570 trace_pci_nvme_get_log(nvme_cid(req
), lid
, lsp
, rae
, len
, off
);
1572 status
= nvme_check_mdts(n
, len
);
1574 trace_pci_nvme_err_mdts(nvme_cid(req
), len
);
1579 case NVME_LOG_ERROR_INFO
:
1580 return nvme_error_info(n
, rae
, len
, off
, req
);
1581 case NVME_LOG_SMART_INFO
:
1582 return nvme_smart_info(n
, rae
, len
, off
, req
);
1583 case NVME_LOG_FW_SLOT_INFO
:
1584 return nvme_fw_log_info(n
, len
, off
, req
);
1586 trace_pci_nvme_err_invalid_log_page(nvme_cid(req
), lid
);
1587 return NVME_INVALID_FIELD
| NVME_DNR
;
1591 static void nvme_free_cq(NvmeCQueue
*cq
, NvmeCtrl
*n
)
1593 n
->cq
[cq
->cqid
] = NULL
;
1594 timer_free(cq
->timer
);
1595 msix_vector_unuse(&n
->parent_obj
, cq
->vector
);
1601 static uint16_t nvme_del_cq(NvmeCtrl
*n
, NvmeRequest
*req
)
1603 NvmeDeleteQ
*c
= (NvmeDeleteQ
*)&req
->cmd
;
1605 uint16_t qid
= le16_to_cpu(c
->qid
);
1607 if (unlikely(!qid
|| nvme_check_cqid(n
, qid
))) {
1608 trace_pci_nvme_err_invalid_del_cq_cqid(qid
);
1609 return NVME_INVALID_CQID
| NVME_DNR
;
1613 if (unlikely(!QTAILQ_EMPTY(&cq
->sq_list
))) {
1614 trace_pci_nvme_err_invalid_del_cq_notempty(qid
);
1615 return NVME_INVALID_QUEUE_DEL
;
1617 nvme_irq_deassert(n
, cq
);
1618 trace_pci_nvme_del_cq(qid
);
1619 nvme_free_cq(cq
, n
);
1620 return NVME_SUCCESS
;
1623 static void nvme_init_cq(NvmeCQueue
*cq
, NvmeCtrl
*n
, uint64_t dma_addr
,
1624 uint16_t cqid
, uint16_t vector
, uint16_t size
,
1625 uint16_t irq_enabled
)
1629 ret
= msix_vector_use(&n
->parent_obj
, vector
);
1634 cq
->dma_addr
= dma_addr
;
1636 cq
->irq_enabled
= irq_enabled
;
1637 cq
->vector
= vector
;
1638 cq
->head
= cq
->tail
= 0;
1639 QTAILQ_INIT(&cq
->req_list
);
1640 QTAILQ_INIT(&cq
->sq_list
);
1642 cq
->timer
= timer_new_ns(QEMU_CLOCK_VIRTUAL
, nvme_post_cqes
, cq
);
1645 static uint16_t nvme_create_cq(NvmeCtrl
*n
, NvmeRequest
*req
)
1648 NvmeCreateCq
*c
= (NvmeCreateCq
*)&req
->cmd
;
1649 uint16_t cqid
= le16_to_cpu(c
->cqid
);
1650 uint16_t vector
= le16_to_cpu(c
->irq_vector
);
1651 uint16_t qsize
= le16_to_cpu(c
->qsize
);
1652 uint16_t qflags
= le16_to_cpu(c
->cq_flags
);
1653 uint64_t prp1
= le64_to_cpu(c
->prp1
);
1655 trace_pci_nvme_create_cq(prp1
, cqid
, vector
, qsize
, qflags
,
1656 NVME_CQ_FLAGS_IEN(qflags
) != 0);
1658 if (unlikely(!cqid
|| cqid
> n
->params
.max_ioqpairs
||
1659 n
->cq
[cqid
] != NULL
)) {
1660 trace_pci_nvme_err_invalid_create_cq_cqid(cqid
);
1661 return NVME_INVALID_QID
| NVME_DNR
;
1663 if (unlikely(!qsize
|| qsize
> NVME_CAP_MQES(n
->bar
.cap
))) {
1664 trace_pci_nvme_err_invalid_create_cq_size(qsize
);
1665 return NVME_MAX_QSIZE_EXCEEDED
| NVME_DNR
;
1667 if (unlikely(prp1
& (n
->page_size
- 1))) {
1668 trace_pci_nvme_err_invalid_create_cq_addr(prp1
);
1669 return NVME_INVALID_PRP_OFFSET
| NVME_DNR
;
1671 if (unlikely(!msix_enabled(&n
->parent_obj
) && vector
)) {
1672 trace_pci_nvme_err_invalid_create_cq_vector(vector
);
1673 return NVME_INVALID_IRQ_VECTOR
| NVME_DNR
;
1675 if (unlikely(vector
>= n
->params
.msix_qsize
)) {
1676 trace_pci_nvme_err_invalid_create_cq_vector(vector
);
1677 return NVME_INVALID_IRQ_VECTOR
| NVME_DNR
;
1679 if (unlikely(!(NVME_CQ_FLAGS_PC(qflags
)))) {
1680 trace_pci_nvme_err_invalid_create_cq_qflags(NVME_CQ_FLAGS_PC(qflags
));
1681 return NVME_INVALID_FIELD
| NVME_DNR
;
1684 cq
= g_malloc0(sizeof(*cq
));
1685 nvme_init_cq(cq
, n
, prp1
, cqid
, vector
, qsize
+ 1,
1686 NVME_CQ_FLAGS_IEN(qflags
));
1689 * It is only required to set qs_created when creating a completion queue;
1690 * creating a submission queue without a matching completion queue will
1693 n
->qs_created
= true;
1694 return NVME_SUCCESS
;
1697 static uint16_t nvme_identify_ctrl(NvmeCtrl
*n
, NvmeRequest
*req
)
1699 trace_pci_nvme_identify_ctrl();
1701 return nvme_dma(n
, (uint8_t *)&n
->id_ctrl
, sizeof(n
->id_ctrl
),
1702 DMA_DIRECTION_FROM_DEVICE
, req
);
1705 static uint16_t nvme_identify_ns(NvmeCtrl
*n
, NvmeRequest
*req
)
1708 NvmeIdentify
*c
= (NvmeIdentify
*)&req
->cmd
;
1709 NvmeIdNs
*id_ns
, inactive
= { 0 };
1710 uint32_t nsid
= le32_to_cpu(c
->nsid
);
1712 trace_pci_nvme_identify_ns(nsid
);
1714 if (!nvme_nsid_valid(n
, nsid
) || nsid
== NVME_NSID_BROADCAST
) {
1715 return NVME_INVALID_NSID
| NVME_DNR
;
1718 ns
= nvme_ns(n
, nsid
);
1719 if (unlikely(!ns
)) {
1725 return nvme_dma(n
, (uint8_t *)id_ns
, sizeof(NvmeIdNs
),
1726 DMA_DIRECTION_FROM_DEVICE
, req
);
1729 static uint16_t nvme_identify_nslist(NvmeCtrl
*n
, NvmeRequest
*req
)
1731 NvmeIdentify
*c
= (NvmeIdentify
*)&req
->cmd
;
1732 static const int data_len
= NVME_IDENTIFY_DATA_SIZE
;
1733 uint32_t min_nsid
= le32_to_cpu(c
->nsid
);
1738 trace_pci_nvme_identify_nslist(min_nsid
);
1741 * Both 0xffffffff (NVME_NSID_BROADCAST) and 0xfffffffe are invalid values
1742 * since the Active Namespace ID List should return namespaces with ids
1743 * *higher* than the NSID specified in the command. This is also specified
1744 * in the spec (NVM Express v1.3d, Section 5.15.4).
1746 if (min_nsid
>= NVME_NSID_BROADCAST
- 1) {
1747 return NVME_INVALID_NSID
| NVME_DNR
;
1750 list
= g_malloc0(data_len
);
1751 for (int i
= 1; i
<= n
->num_namespaces
; i
++) {
1752 if (i
<= min_nsid
|| !nvme_ns(n
, i
)) {
1755 list
[j
++] = cpu_to_le32(i
);
1756 if (j
== data_len
/ sizeof(uint32_t)) {
1760 ret
= nvme_dma(n
, (uint8_t *)list
, data_len
, DMA_DIRECTION_FROM_DEVICE
,
1766 static uint16_t nvme_identify_ns_descr_list(NvmeCtrl
*n
, NvmeRequest
*req
)
1769 NvmeIdentify
*c
= (NvmeIdentify
*)&req
->cmd
;
1770 uint32_t nsid
= le32_to_cpu(c
->nsid
);
1771 uint8_t list
[NVME_IDENTIFY_DATA_SIZE
];
1780 struct data
*ns_descrs
= (struct data
*)list
;
1782 trace_pci_nvme_identify_ns_descr_list(nsid
);
1784 if (!nvme_nsid_valid(n
, nsid
) || nsid
== NVME_NSID_BROADCAST
) {
1785 return NVME_INVALID_NSID
| NVME_DNR
;
1788 ns
= nvme_ns(n
, nsid
);
1789 if (unlikely(!ns
)) {
1790 return NVME_INVALID_FIELD
| NVME_DNR
;
1793 memset(list
, 0x0, sizeof(list
));
1796 * Because the NGUID and EUI64 fields are 0 in the Identify Namespace data
1797 * structure, a Namespace UUID (nidt = 0x3) must be reported in the
1798 * Namespace Identification Descriptor. Add the namespace UUID here.
1800 ns_descrs
->uuid
.hdr
.nidt
= NVME_NIDT_UUID
;
1801 ns_descrs
->uuid
.hdr
.nidl
= NVME_NIDT_UUID_LEN
;
1802 memcpy(&ns_descrs
->uuid
.v
, ns
->params
.uuid
.data
, NVME_NIDT_UUID_LEN
);
1804 return nvme_dma(n
, list
, NVME_IDENTIFY_DATA_SIZE
,
1805 DMA_DIRECTION_FROM_DEVICE
, req
);
1808 static uint16_t nvme_identify(NvmeCtrl
*n
, NvmeRequest
*req
)
1810 NvmeIdentify
*c
= (NvmeIdentify
*)&req
->cmd
;
1812 switch (le32_to_cpu(c
->cns
)) {
1813 case NVME_ID_CNS_NS
:
1814 return nvme_identify_ns(n
, req
);
1815 case NVME_ID_CNS_CTRL
:
1816 return nvme_identify_ctrl(n
, req
);
1817 case NVME_ID_CNS_NS_ACTIVE_LIST
:
1818 return nvme_identify_nslist(n
, req
);
1819 case NVME_ID_CNS_NS_DESCR_LIST
:
1820 return nvme_identify_ns_descr_list(n
, req
);
1822 trace_pci_nvme_err_invalid_identify_cns(le32_to_cpu(c
->cns
));
1823 return NVME_INVALID_FIELD
| NVME_DNR
;
1827 static uint16_t nvme_abort(NvmeCtrl
*n
, NvmeRequest
*req
)
1829 uint16_t sqid
= le32_to_cpu(req
->cmd
.cdw10
) & 0xffff;
1831 req
->cqe
.result
= 1;
1832 if (nvme_check_sqid(n
, sqid
)) {
1833 return NVME_INVALID_FIELD
| NVME_DNR
;
1836 return NVME_SUCCESS
;
1839 static inline void nvme_set_timestamp(NvmeCtrl
*n
, uint64_t ts
)
1841 trace_pci_nvme_setfeat_timestamp(ts
);
1843 n
->host_timestamp
= le64_to_cpu(ts
);
1844 n
->timestamp_set_qemu_clock_ms
= qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
);
1847 static inline uint64_t nvme_get_timestamp(const NvmeCtrl
*n
)
1849 uint64_t current_time
= qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
);
1850 uint64_t elapsed_time
= current_time
- n
->timestamp_set_qemu_clock_ms
;
1852 union nvme_timestamp
{
1854 uint64_t timestamp
:48;
1862 union nvme_timestamp ts
;
1864 ts
.timestamp
= n
->host_timestamp
+ elapsed_time
;
1866 /* If the host timestamp is non-zero, set the timestamp origin */
1867 ts
.origin
= n
->host_timestamp
? 0x01 : 0x00;
1869 trace_pci_nvme_getfeat_timestamp(ts
.all
);
1871 return cpu_to_le64(ts
.all
);
1874 static uint16_t nvme_get_feature_timestamp(NvmeCtrl
*n
, NvmeRequest
*req
)
1876 uint64_t timestamp
= nvme_get_timestamp(n
);
1878 return nvme_dma(n
, (uint8_t *)×tamp
, sizeof(timestamp
),
1879 DMA_DIRECTION_FROM_DEVICE
, req
);
1882 static uint16_t nvme_get_feature(NvmeCtrl
*n
, NvmeRequest
*req
)
1884 NvmeCmd
*cmd
= &req
->cmd
;
1885 uint32_t dw10
= le32_to_cpu(cmd
->cdw10
);
1886 uint32_t dw11
= le32_to_cpu(cmd
->cdw11
);
1887 uint32_t nsid
= le32_to_cpu(cmd
->nsid
);
1889 uint8_t fid
= NVME_GETSETFEAT_FID(dw10
);
1890 NvmeGetFeatureSelect sel
= NVME_GETFEAT_SELECT(dw10
);
1894 static const uint32_t nvme_feature_default
[NVME_FID_MAX
] = {
1895 [NVME_ARBITRATION
] = NVME_ARB_AB_NOLIMIT
,
1898 trace_pci_nvme_getfeat(nvme_cid(req
), nsid
, fid
, sel
, dw11
);
1900 if (!nvme_feature_support
[fid
]) {
1901 return NVME_INVALID_FIELD
| NVME_DNR
;
1904 if (nvme_feature_cap
[fid
] & NVME_FEAT_CAP_NS
) {
1905 if (!nvme_nsid_valid(n
, nsid
) || nsid
== NVME_NSID_BROADCAST
) {
1907 * The Reservation Notification Mask and Reservation Persistence
1908 * features require a status code of Invalid Field in Command when
1909 * NSID is 0xFFFFFFFF. Since the device does not support those
1910 * features we can always return Invalid Namespace or Format as we
1911 * should do for all other features.
1913 return NVME_INVALID_NSID
| NVME_DNR
;
1916 if (!nvme_ns(n
, nsid
)) {
1917 return NVME_INVALID_FIELD
| NVME_DNR
;
1922 case NVME_GETFEAT_SELECT_CURRENT
:
1924 case NVME_GETFEAT_SELECT_SAVED
:
1925 /* no features are saveable by the controller; fallthrough */
1926 case NVME_GETFEAT_SELECT_DEFAULT
:
1928 case NVME_GETFEAT_SELECT_CAP
:
1929 result
= nvme_feature_cap
[fid
];
1934 case NVME_TEMPERATURE_THRESHOLD
:
1938 * The controller only implements the Composite Temperature sensor, so
1939 * return 0 for all other sensors.
1941 if (NVME_TEMP_TMPSEL(dw11
) != NVME_TEMP_TMPSEL_COMPOSITE
) {
1945 switch (NVME_TEMP_THSEL(dw11
)) {
1946 case NVME_TEMP_THSEL_OVER
:
1947 result
= n
->features
.temp_thresh_hi
;
1949 case NVME_TEMP_THSEL_UNDER
:
1950 result
= n
->features
.temp_thresh_low
;
1954 return NVME_INVALID_FIELD
| NVME_DNR
;
1955 case NVME_ERROR_RECOVERY
:
1956 if (!nvme_nsid_valid(n
, nsid
)) {
1957 return NVME_INVALID_NSID
| NVME_DNR
;
1960 ns
= nvme_ns(n
, nsid
);
1961 if (unlikely(!ns
)) {
1962 return NVME_INVALID_FIELD
| NVME_DNR
;
1965 result
= ns
->features
.err_rec
;
1967 case NVME_VOLATILE_WRITE_CACHE
:
1968 result
= n
->features
.vwc
;
1969 trace_pci_nvme_getfeat_vwcache(result
? "enabled" : "disabled");
1971 case NVME_ASYNCHRONOUS_EVENT_CONF
:
1972 result
= n
->features
.async_config
;
1974 case NVME_TIMESTAMP
:
1975 return nvme_get_feature_timestamp(n
, req
);
1982 case NVME_TEMPERATURE_THRESHOLD
:
1985 if (NVME_TEMP_TMPSEL(dw11
) != NVME_TEMP_TMPSEL_COMPOSITE
) {
1989 if (NVME_TEMP_THSEL(dw11
) == NVME_TEMP_THSEL_OVER
) {
1990 result
= NVME_TEMPERATURE_WARNING
;
1994 case NVME_NUMBER_OF_QUEUES
:
1995 result
= (n
->params
.max_ioqpairs
- 1) |
1996 ((n
->params
.max_ioqpairs
- 1) << 16);
1997 trace_pci_nvme_getfeat_numq(result
);
1999 case NVME_INTERRUPT_VECTOR_CONF
:
2001 if (iv
>= n
->params
.max_ioqpairs
+ 1) {
2002 return NVME_INVALID_FIELD
| NVME_DNR
;
2006 if (iv
== n
->admin_cq
.vector
) {
2007 result
|= NVME_INTVC_NOCOALESCING
;
2012 result
= nvme_feature_default
[fid
];
2017 req
->cqe
.result
= cpu_to_le32(result
);
2018 return NVME_SUCCESS
;
2021 static uint16_t nvme_set_feature_timestamp(NvmeCtrl
*n
, NvmeRequest
*req
)
2026 ret
= nvme_dma(n
, (uint8_t *)×tamp
, sizeof(timestamp
),
2027 DMA_DIRECTION_TO_DEVICE
, req
);
2028 if (ret
!= NVME_SUCCESS
) {
2032 nvme_set_timestamp(n
, timestamp
);
2034 return NVME_SUCCESS
;
2037 static uint16_t nvme_set_feature(NvmeCtrl
*n
, NvmeRequest
*req
)
2039 NvmeNamespace
*ns
= NULL
;
2041 NvmeCmd
*cmd
= &req
->cmd
;
2042 uint32_t dw10
= le32_to_cpu(cmd
->cdw10
);
2043 uint32_t dw11
= le32_to_cpu(cmd
->cdw11
);
2044 uint32_t nsid
= le32_to_cpu(cmd
->nsid
);
2045 uint8_t fid
= NVME_GETSETFEAT_FID(dw10
);
2046 uint8_t save
= NVME_SETFEAT_SAVE(dw10
);
2049 trace_pci_nvme_setfeat(nvme_cid(req
), nsid
, fid
, save
, dw11
);
2052 return NVME_FID_NOT_SAVEABLE
| NVME_DNR
;
2055 if (!nvme_feature_support
[fid
]) {
2056 return NVME_INVALID_FIELD
| NVME_DNR
;
2059 if (nvme_feature_cap
[fid
] & NVME_FEAT_CAP_NS
) {
2060 if (nsid
!= NVME_NSID_BROADCAST
) {
2061 if (!nvme_nsid_valid(n
, nsid
)) {
2062 return NVME_INVALID_NSID
| NVME_DNR
;
2065 ns
= nvme_ns(n
, nsid
);
2066 if (unlikely(!ns
)) {
2067 return NVME_INVALID_FIELD
| NVME_DNR
;
2070 } else if (nsid
&& nsid
!= NVME_NSID_BROADCAST
) {
2071 if (!nvme_nsid_valid(n
, nsid
)) {
2072 return NVME_INVALID_NSID
| NVME_DNR
;
2075 return NVME_FEAT_NOT_NS_SPEC
| NVME_DNR
;
2078 if (!(nvme_feature_cap
[fid
] & NVME_FEAT_CAP_CHANGE
)) {
2079 return NVME_FEAT_NOT_CHANGEABLE
| NVME_DNR
;
2083 case NVME_TEMPERATURE_THRESHOLD
:
2084 if (NVME_TEMP_TMPSEL(dw11
) != NVME_TEMP_TMPSEL_COMPOSITE
) {
2088 switch (NVME_TEMP_THSEL(dw11
)) {
2089 case NVME_TEMP_THSEL_OVER
:
2090 n
->features
.temp_thresh_hi
= NVME_TEMP_TMPTH(dw11
);
2092 case NVME_TEMP_THSEL_UNDER
:
2093 n
->features
.temp_thresh_low
= NVME_TEMP_TMPTH(dw11
);
2096 return NVME_INVALID_FIELD
| NVME_DNR
;
2099 if (((n
->temperature
>= n
->features
.temp_thresh_hi
) ||
2100 (n
->temperature
<= n
->features
.temp_thresh_low
)) &&
2101 NVME_AEC_SMART(n
->features
.async_config
) & NVME_SMART_TEMPERATURE
) {
2102 nvme_enqueue_event(n
, NVME_AER_TYPE_SMART
,
2103 NVME_AER_INFO_SMART_TEMP_THRESH
,
2104 NVME_LOG_SMART_INFO
);
2108 case NVME_ERROR_RECOVERY
:
2109 if (nsid
== NVME_NSID_BROADCAST
) {
2110 for (i
= 1; i
<= n
->num_namespaces
; i
++) {
2117 if (NVME_ID_NS_NSFEAT_DULBE(ns
->id_ns
.nsfeat
)) {
2118 ns
->features
.err_rec
= dw11
;
2126 ns
->features
.err_rec
= dw11
;
2128 case NVME_VOLATILE_WRITE_CACHE
:
2129 n
->features
.vwc
= dw11
& 0x1;
2131 for (i
= 1; i
<= n
->num_namespaces
; i
++) {
2137 if (!(dw11
& 0x1) && blk_enable_write_cache(ns
->blkconf
.blk
)) {
2138 blk_flush(ns
->blkconf
.blk
);
2141 blk_set_enable_write_cache(ns
->blkconf
.blk
, dw11
& 1);
2146 case NVME_NUMBER_OF_QUEUES
:
2147 if (n
->qs_created
) {
2148 return NVME_CMD_SEQ_ERROR
| NVME_DNR
;
2152 * NVMe v1.3, Section 5.21.1.7: 0xffff is not an allowed value for NCQR
2155 if ((dw11
& 0xffff) == 0xffff || ((dw11
>> 16) & 0xffff) == 0xffff) {
2156 return NVME_INVALID_FIELD
| NVME_DNR
;
2159 trace_pci_nvme_setfeat_numq((dw11
& 0xFFFF) + 1,
2160 ((dw11
>> 16) & 0xFFFF) + 1,
2161 n
->params
.max_ioqpairs
,
2162 n
->params
.max_ioqpairs
);
2163 req
->cqe
.result
= cpu_to_le32((n
->params
.max_ioqpairs
- 1) |
2164 ((n
->params
.max_ioqpairs
- 1) << 16));
2166 case NVME_ASYNCHRONOUS_EVENT_CONF
:
2167 n
->features
.async_config
= dw11
;
2169 case NVME_TIMESTAMP
:
2170 return nvme_set_feature_timestamp(n
, req
);
2172 return NVME_FEAT_NOT_CHANGEABLE
| NVME_DNR
;
2174 return NVME_SUCCESS
;
2177 static uint16_t nvme_aer(NvmeCtrl
*n
, NvmeRequest
*req
)
2179 trace_pci_nvme_aer(nvme_cid(req
));
2181 if (n
->outstanding_aers
> n
->params
.aerl
) {
2182 trace_pci_nvme_aer_aerl_exceeded();
2183 return NVME_AER_LIMIT_EXCEEDED
;
2186 n
->aer_reqs
[n
->outstanding_aers
] = req
;
2187 n
->outstanding_aers
++;
2189 if (!QTAILQ_EMPTY(&n
->aer_queue
)) {
2190 nvme_process_aers(n
);
2193 return NVME_NO_COMPLETE
;
2196 static uint16_t nvme_admin_cmd(NvmeCtrl
*n
, NvmeRequest
*req
)
2198 trace_pci_nvme_admin_cmd(nvme_cid(req
), nvme_sqid(req
), req
->cmd
.opcode
,
2199 nvme_adm_opc_str(req
->cmd
.opcode
));
2201 switch (req
->cmd
.opcode
) {
2202 case NVME_ADM_CMD_DELETE_SQ
:
2203 return nvme_del_sq(n
, req
);
2204 case NVME_ADM_CMD_CREATE_SQ
:
2205 return nvme_create_sq(n
, req
);
2206 case NVME_ADM_CMD_GET_LOG_PAGE
:
2207 return nvme_get_log(n
, req
);
2208 case NVME_ADM_CMD_DELETE_CQ
:
2209 return nvme_del_cq(n
, req
);
2210 case NVME_ADM_CMD_CREATE_CQ
:
2211 return nvme_create_cq(n
, req
);
2212 case NVME_ADM_CMD_IDENTIFY
:
2213 return nvme_identify(n
, req
);
2214 case NVME_ADM_CMD_ABORT
:
2215 return nvme_abort(n
, req
);
2216 case NVME_ADM_CMD_SET_FEATURES
:
2217 return nvme_set_feature(n
, req
);
2218 case NVME_ADM_CMD_GET_FEATURES
:
2219 return nvme_get_feature(n
, req
);
2220 case NVME_ADM_CMD_ASYNC_EV_REQ
:
2221 return nvme_aer(n
, req
);
2223 trace_pci_nvme_err_invalid_admin_opc(req
->cmd
.opcode
);
2224 return NVME_INVALID_OPCODE
| NVME_DNR
;
2228 static void nvme_process_sq(void *opaque
)
2230 NvmeSQueue
*sq
= opaque
;
2231 NvmeCtrl
*n
= sq
->ctrl
;
2232 NvmeCQueue
*cq
= n
->cq
[sq
->cqid
];
2239 while (!(nvme_sq_empty(sq
) || QTAILQ_EMPTY(&sq
->req_list
))) {
2240 addr
= sq
->dma_addr
+ sq
->head
* n
->sqe_size
;
2241 if (nvme_addr_read(n
, addr
, (void *)&cmd
, sizeof(cmd
))) {
2242 trace_pci_nvme_err_addr_read(addr
);
2243 trace_pci_nvme_err_cfs();
2244 n
->bar
.csts
= NVME_CSTS_FAILED
;
2247 nvme_inc_sq_head(sq
);
2249 req
= QTAILQ_FIRST(&sq
->req_list
);
2250 QTAILQ_REMOVE(&sq
->req_list
, req
, entry
);
2251 QTAILQ_INSERT_TAIL(&sq
->out_req_list
, req
, entry
);
2252 nvme_req_clear(req
);
2253 req
->cqe
.cid
= cmd
.cid
;
2254 memcpy(&req
->cmd
, &cmd
, sizeof(NvmeCmd
));
2256 status
= sq
->sqid
? nvme_io_cmd(n
, req
) :
2257 nvme_admin_cmd(n
, req
);
2258 if (status
!= NVME_NO_COMPLETE
) {
2259 req
->status
= status
;
2260 nvme_enqueue_req_completion(cq
, req
);
2265 static void nvme_clear_ctrl(NvmeCtrl
*n
)
2270 for (i
= 1; i
<= n
->num_namespaces
; i
++) {
2279 for (i
= 0; i
< n
->params
.max_ioqpairs
+ 1; i
++) {
2280 if (n
->sq
[i
] != NULL
) {
2281 nvme_free_sq(n
->sq
[i
], n
);
2284 for (i
= 0; i
< n
->params
.max_ioqpairs
+ 1; i
++) {
2285 if (n
->cq
[i
] != NULL
) {
2286 nvme_free_cq(n
->cq
[i
], n
);
2290 while (!QTAILQ_EMPTY(&n
->aer_queue
)) {
2291 NvmeAsyncEvent
*event
= QTAILQ_FIRST(&n
->aer_queue
);
2292 QTAILQ_REMOVE(&n
->aer_queue
, event
, entry
);
2297 n
->outstanding_aers
= 0;
2298 n
->qs_created
= false;
2301 static void nvme_ctrl_reset(NvmeCtrl
*n
)
2307 static void nvme_ctrl_shutdown(NvmeCtrl
*n
)
2314 for (i
= 1; i
<= n
->num_namespaces
; i
++) {
2320 nvme_ns_shutdown(ns
);
2324 static int nvme_start_ctrl(NvmeCtrl
*n
)
2326 uint32_t page_bits
= NVME_CC_MPS(n
->bar
.cc
) + 12;
2327 uint32_t page_size
= 1 << page_bits
;
2329 if (unlikely(n
->cq
[0])) {
2330 trace_pci_nvme_err_startfail_cq();
2333 if (unlikely(n
->sq
[0])) {
2334 trace_pci_nvme_err_startfail_sq();
2337 if (unlikely(!n
->bar
.asq
)) {
2338 trace_pci_nvme_err_startfail_nbarasq();
2341 if (unlikely(!n
->bar
.acq
)) {
2342 trace_pci_nvme_err_startfail_nbaracq();
2345 if (unlikely(n
->bar
.asq
& (page_size
- 1))) {
2346 trace_pci_nvme_err_startfail_asq_misaligned(n
->bar
.asq
);
2349 if (unlikely(n
->bar
.acq
& (page_size
- 1))) {
2350 trace_pci_nvme_err_startfail_acq_misaligned(n
->bar
.acq
);
2353 if (unlikely(!(NVME_CAP_CSS(n
->bar
.cap
) & (1 << NVME_CC_CSS(n
->bar
.cc
))))) {
2354 trace_pci_nvme_err_startfail_css(NVME_CC_CSS(n
->bar
.cc
));
2357 if (unlikely(NVME_CC_MPS(n
->bar
.cc
) <
2358 NVME_CAP_MPSMIN(n
->bar
.cap
))) {
2359 trace_pci_nvme_err_startfail_page_too_small(
2360 NVME_CC_MPS(n
->bar
.cc
),
2361 NVME_CAP_MPSMIN(n
->bar
.cap
));
2364 if (unlikely(NVME_CC_MPS(n
->bar
.cc
) >
2365 NVME_CAP_MPSMAX(n
->bar
.cap
))) {
2366 trace_pci_nvme_err_startfail_page_too_large(
2367 NVME_CC_MPS(n
->bar
.cc
),
2368 NVME_CAP_MPSMAX(n
->bar
.cap
));
2371 if (unlikely(NVME_CC_IOCQES(n
->bar
.cc
) <
2372 NVME_CTRL_CQES_MIN(n
->id_ctrl
.cqes
))) {
2373 trace_pci_nvme_err_startfail_cqent_too_small(
2374 NVME_CC_IOCQES(n
->bar
.cc
),
2375 NVME_CTRL_CQES_MIN(n
->bar
.cap
));
2378 if (unlikely(NVME_CC_IOCQES(n
->bar
.cc
) >
2379 NVME_CTRL_CQES_MAX(n
->id_ctrl
.cqes
))) {
2380 trace_pci_nvme_err_startfail_cqent_too_large(
2381 NVME_CC_IOCQES(n
->bar
.cc
),
2382 NVME_CTRL_CQES_MAX(n
->bar
.cap
));
2385 if (unlikely(NVME_CC_IOSQES(n
->bar
.cc
) <
2386 NVME_CTRL_SQES_MIN(n
->id_ctrl
.sqes
))) {
2387 trace_pci_nvme_err_startfail_sqent_too_small(
2388 NVME_CC_IOSQES(n
->bar
.cc
),
2389 NVME_CTRL_SQES_MIN(n
->bar
.cap
));
2392 if (unlikely(NVME_CC_IOSQES(n
->bar
.cc
) >
2393 NVME_CTRL_SQES_MAX(n
->id_ctrl
.sqes
))) {
2394 trace_pci_nvme_err_startfail_sqent_too_large(
2395 NVME_CC_IOSQES(n
->bar
.cc
),
2396 NVME_CTRL_SQES_MAX(n
->bar
.cap
));
2399 if (unlikely(!NVME_AQA_ASQS(n
->bar
.aqa
))) {
2400 trace_pci_nvme_err_startfail_asqent_sz_zero();
2403 if (unlikely(!NVME_AQA_ACQS(n
->bar
.aqa
))) {
2404 trace_pci_nvme_err_startfail_acqent_sz_zero();
2408 n
->page_bits
= page_bits
;
2409 n
->page_size
= page_size
;
2410 n
->max_prp_ents
= n
->page_size
/ sizeof(uint64_t);
2411 n
->cqe_size
= 1 << NVME_CC_IOCQES(n
->bar
.cc
);
2412 n
->sqe_size
= 1 << NVME_CC_IOSQES(n
->bar
.cc
);
2413 nvme_init_cq(&n
->admin_cq
, n
, n
->bar
.acq
, 0, 0,
2414 NVME_AQA_ACQS(n
->bar
.aqa
) + 1, 1);
2415 nvme_init_sq(&n
->admin_sq
, n
, n
->bar
.asq
, 0, 0,
2416 NVME_AQA_ASQS(n
->bar
.aqa
) + 1);
2418 nvme_set_timestamp(n
, 0ULL);
2420 QTAILQ_INIT(&n
->aer_queue
);
2425 static void nvme_write_bar(NvmeCtrl
*n
, hwaddr offset
, uint64_t data
,
2428 if (unlikely(offset
& (sizeof(uint32_t) - 1))) {
2429 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_misaligned32
,
2430 "MMIO write not 32-bit aligned,"
2431 " offset=0x%"PRIx64
"", offset
);
2432 /* should be ignored, fall through for now */
2435 if (unlikely(size
< sizeof(uint32_t))) {
2436 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_toosmall
,
2437 "MMIO write smaller than 32-bits,"
2438 " offset=0x%"PRIx64
", size=%u",
2440 /* should be ignored, fall through for now */
2444 case 0xc: /* INTMS */
2445 if (unlikely(msix_enabled(&(n
->parent_obj
)))) {
2446 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_intmask_with_msix
,
2447 "undefined access to interrupt mask set"
2448 " when MSI-X is enabled");
2449 /* should be ignored, fall through for now */
2451 n
->bar
.intms
|= data
& 0xffffffff;
2452 n
->bar
.intmc
= n
->bar
.intms
;
2453 trace_pci_nvme_mmio_intm_set(data
& 0xffffffff, n
->bar
.intmc
);
2456 case 0x10: /* INTMC */
2457 if (unlikely(msix_enabled(&(n
->parent_obj
)))) {
2458 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_intmask_with_msix
,
2459 "undefined access to interrupt mask clr"
2460 " when MSI-X is enabled");
2461 /* should be ignored, fall through for now */
2463 n
->bar
.intms
&= ~(data
& 0xffffffff);
2464 n
->bar
.intmc
= n
->bar
.intms
;
2465 trace_pci_nvme_mmio_intm_clr(data
& 0xffffffff, n
->bar
.intmc
);
2469 trace_pci_nvme_mmio_cfg(data
& 0xffffffff);
2470 /* Windows first sends data, then sends enable bit */
2471 if (!NVME_CC_EN(data
) && !NVME_CC_EN(n
->bar
.cc
) &&
2472 !NVME_CC_SHN(data
) && !NVME_CC_SHN(n
->bar
.cc
))
2477 if (NVME_CC_EN(data
) && !NVME_CC_EN(n
->bar
.cc
)) {
2479 if (unlikely(nvme_start_ctrl(n
))) {
2480 trace_pci_nvme_err_startfail();
2481 n
->bar
.csts
= NVME_CSTS_FAILED
;
2483 trace_pci_nvme_mmio_start_success();
2484 n
->bar
.csts
= NVME_CSTS_READY
;
2486 } else if (!NVME_CC_EN(data
) && NVME_CC_EN(n
->bar
.cc
)) {
2487 trace_pci_nvme_mmio_stopped();
2489 n
->bar
.csts
&= ~NVME_CSTS_READY
;
2491 if (NVME_CC_SHN(data
) && !(NVME_CC_SHN(n
->bar
.cc
))) {
2492 trace_pci_nvme_mmio_shutdown_set();
2493 nvme_ctrl_shutdown(n
);
2495 n
->bar
.csts
|= NVME_CSTS_SHST_COMPLETE
;
2496 } else if (!NVME_CC_SHN(data
) && NVME_CC_SHN(n
->bar
.cc
)) {
2497 trace_pci_nvme_mmio_shutdown_cleared();
2498 n
->bar
.csts
&= ~NVME_CSTS_SHST_COMPLETE
;
2502 case 0x1C: /* CSTS */
2503 if (data
& (1 << 4)) {
2504 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_ssreset_w1c_unsupported
,
2505 "attempted to W1C CSTS.NSSRO"
2506 " but CAP.NSSRS is zero (not supported)");
2507 } else if (data
!= 0) {
2508 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_ro_csts
,
2509 "attempted to set a read only bit"
2510 " of controller status");
2513 case 0x20: /* NSSR */
2514 if (data
== 0x4E564D65) {
2515 trace_pci_nvme_ub_mmiowr_ssreset_unsupported();
2517 /* The spec says that writes of other values have no effect */
2521 case 0x24: /* AQA */
2522 n
->bar
.aqa
= data
& 0xffffffff;
2523 trace_pci_nvme_mmio_aqattr(data
& 0xffffffff);
2525 case 0x28: /* ASQ */
2527 trace_pci_nvme_mmio_asqaddr(data
);
2529 case 0x2c: /* ASQ hi */
2530 n
->bar
.asq
|= data
<< 32;
2531 trace_pci_nvme_mmio_asqaddr_hi(data
, n
->bar
.asq
);
2533 case 0x30: /* ACQ */
2534 trace_pci_nvme_mmio_acqaddr(data
);
2537 case 0x34: /* ACQ hi */
2538 n
->bar
.acq
|= data
<< 32;
2539 trace_pci_nvme_mmio_acqaddr_hi(data
, n
->bar
.acq
);
2541 case 0x38: /* CMBLOC */
2542 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_cmbloc_reserved
,
2543 "invalid write to reserved CMBLOC"
2544 " when CMBSZ is zero, ignored");
2546 case 0x3C: /* CMBSZ */
2547 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_cmbsz_readonly
,
2548 "invalid write to read only CMBSZ, ignored");
2550 case 0xE00: /* PMRCAP */
2551 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrcap_readonly
,
2552 "invalid write to PMRCAP register, ignored");
2554 case 0xE04: /* TODO PMRCTL */
2556 case 0xE08: /* PMRSTS */
2557 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrsts_readonly
,
2558 "invalid write to PMRSTS register, ignored");
2560 case 0xE0C: /* PMREBS */
2561 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrebs_readonly
,
2562 "invalid write to PMREBS register, ignored");
2564 case 0xE10: /* PMRSWTP */
2565 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrswtp_readonly
,
2566 "invalid write to PMRSWTP register, ignored");
2568 case 0xE14: /* TODO PMRMSC */
2571 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_invalid
,
2572 "invalid MMIO write,"
2573 " offset=0x%"PRIx64
", data=%"PRIx64
"",
2579 static uint64_t nvme_mmio_read(void *opaque
, hwaddr addr
, unsigned size
)
2581 NvmeCtrl
*n
= (NvmeCtrl
*)opaque
;
2582 uint8_t *ptr
= (uint8_t *)&n
->bar
;
2585 trace_pci_nvme_mmio_read(addr
);
2587 if (unlikely(addr
& (sizeof(uint32_t) - 1))) {
2588 NVME_GUEST_ERR(pci_nvme_ub_mmiord_misaligned32
,
2589 "MMIO read not 32-bit aligned,"
2590 " offset=0x%"PRIx64
"", addr
);
2591 /* should RAZ, fall through for now */
2592 } else if (unlikely(size
< sizeof(uint32_t))) {
2593 NVME_GUEST_ERR(pci_nvme_ub_mmiord_toosmall
,
2594 "MMIO read smaller than 32-bits,"
2595 " offset=0x%"PRIx64
"", addr
);
2596 /* should RAZ, fall through for now */
2599 if (addr
< sizeof(n
->bar
)) {
2601 * When PMRWBM bit 1 is set then read from
2602 * from PMRSTS should ensure prior writes
2603 * made it to persistent media
2605 if (addr
== 0xE08 &&
2606 (NVME_PMRCAP_PMRWBM(n
->bar
.pmrcap
) & 0x02)) {
2607 memory_region_msync(&n
->pmrdev
->mr
, 0, n
->pmrdev
->size
);
2609 memcpy(&val
, ptr
+ addr
, size
);
2611 NVME_GUEST_ERR(pci_nvme_ub_mmiord_invalid_ofs
,
2612 "MMIO read beyond last register,"
2613 " offset=0x%"PRIx64
", returning 0", addr
);
2619 static void nvme_process_db(NvmeCtrl
*n
, hwaddr addr
, int val
)
2623 if (unlikely(addr
& ((1 << 2) - 1))) {
2624 NVME_GUEST_ERR(pci_nvme_ub_db_wr_misaligned
,
2625 "doorbell write not 32-bit aligned,"
2626 " offset=0x%"PRIx64
", ignoring", addr
);
2630 if (((addr
- 0x1000) >> 2) & 1) {
2631 /* Completion queue doorbell write */
2633 uint16_t new_head
= val
& 0xffff;
2637 qid
= (addr
- (0x1000 + (1 << 2))) >> 3;
2638 if (unlikely(nvme_check_cqid(n
, qid
))) {
2639 NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_cq
,
2640 "completion queue doorbell write"
2641 " for nonexistent queue,"
2642 " sqid=%"PRIu32
", ignoring", qid
);
2645 * NVM Express v1.3d, Section 4.1 state: "If host software writes
2646 * an invalid value to the Submission Queue Tail Doorbell or
2647 * Completion Queue Head Doorbell regiter and an Asynchronous Event
2648 * Request command is outstanding, then an asynchronous event is
2649 * posted to the Admin Completion Queue with a status code of
2650 * Invalid Doorbell Write Value."
2652 * Also note that the spec includes the "Invalid Doorbell Register"
2653 * status code, but nowhere does it specify when to use it.
2654 * However, it seems reasonable to use it here in a similar
2657 if (n
->outstanding_aers
) {
2658 nvme_enqueue_event(n
, NVME_AER_TYPE_ERROR
,
2659 NVME_AER_INFO_ERR_INVALID_DB_REGISTER
,
2660 NVME_LOG_ERROR_INFO
);
2667 if (unlikely(new_head
>= cq
->size
)) {
2668 NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_cqhead
,
2669 "completion queue doorbell write value"
2670 " beyond queue size, sqid=%"PRIu32
","
2671 " new_head=%"PRIu16
", ignoring",
2674 if (n
->outstanding_aers
) {
2675 nvme_enqueue_event(n
, NVME_AER_TYPE_ERROR
,
2676 NVME_AER_INFO_ERR_INVALID_DB_VALUE
,
2677 NVME_LOG_ERROR_INFO
);
2683 trace_pci_nvme_mmio_doorbell_cq(cq
->cqid
, new_head
);
2685 start_sqs
= nvme_cq_full(cq
) ? 1 : 0;
2686 cq
->head
= new_head
;
2689 QTAILQ_FOREACH(sq
, &cq
->sq_list
, entry
) {
2690 timer_mod(sq
->timer
, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) + 500);
2692 timer_mod(cq
->timer
, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) + 500);
2695 if (cq
->tail
== cq
->head
) {
2696 nvme_irq_deassert(n
, cq
);
2699 /* Submission queue doorbell write */
2701 uint16_t new_tail
= val
& 0xffff;
2704 qid
= (addr
- 0x1000) >> 3;
2705 if (unlikely(nvme_check_sqid(n
, qid
))) {
2706 NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_sq
,
2707 "submission queue doorbell write"
2708 " for nonexistent queue,"
2709 " sqid=%"PRIu32
", ignoring", qid
);
2711 if (n
->outstanding_aers
) {
2712 nvme_enqueue_event(n
, NVME_AER_TYPE_ERROR
,
2713 NVME_AER_INFO_ERR_INVALID_DB_REGISTER
,
2714 NVME_LOG_ERROR_INFO
);
2721 if (unlikely(new_tail
>= sq
->size
)) {
2722 NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_sqtail
,
2723 "submission queue doorbell write value"
2724 " beyond queue size, sqid=%"PRIu32
","
2725 " new_tail=%"PRIu16
", ignoring",
2728 if (n
->outstanding_aers
) {
2729 nvme_enqueue_event(n
, NVME_AER_TYPE_ERROR
,
2730 NVME_AER_INFO_ERR_INVALID_DB_VALUE
,
2731 NVME_LOG_ERROR_INFO
);
2737 trace_pci_nvme_mmio_doorbell_sq(sq
->sqid
, new_tail
);
2739 sq
->tail
= new_tail
;
2740 timer_mod(sq
->timer
, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) + 500);
2744 static void nvme_mmio_write(void *opaque
, hwaddr addr
, uint64_t data
,
2747 NvmeCtrl
*n
= (NvmeCtrl
*)opaque
;
2749 trace_pci_nvme_mmio_write(addr
, data
);
2751 if (addr
< sizeof(n
->bar
)) {
2752 nvme_write_bar(n
, addr
, data
, size
);
2754 nvme_process_db(n
, addr
, data
);
2758 static const MemoryRegionOps nvme_mmio_ops
= {
2759 .read
= nvme_mmio_read
,
2760 .write
= nvme_mmio_write
,
2761 .endianness
= DEVICE_LITTLE_ENDIAN
,
2763 .min_access_size
= 2,
2764 .max_access_size
= 8,
2768 static void nvme_cmb_write(void *opaque
, hwaddr addr
, uint64_t data
,
2771 NvmeCtrl
*n
= (NvmeCtrl
*)opaque
;
2772 stn_le_p(&n
->cmbuf
[addr
], size
, data
);
2775 static uint64_t nvme_cmb_read(void *opaque
, hwaddr addr
, unsigned size
)
2777 NvmeCtrl
*n
= (NvmeCtrl
*)opaque
;
2778 return ldn_le_p(&n
->cmbuf
[addr
], size
);
2781 static const MemoryRegionOps nvme_cmb_ops
= {
2782 .read
= nvme_cmb_read
,
2783 .write
= nvme_cmb_write
,
2784 .endianness
= DEVICE_LITTLE_ENDIAN
,
2786 .min_access_size
= 1,
2787 .max_access_size
= 8,
2791 static void nvme_check_constraints(NvmeCtrl
*n
, Error
**errp
)
2793 NvmeParams
*params
= &n
->params
;
2795 if (params
->num_queues
) {
2796 warn_report("num_queues is deprecated; please use max_ioqpairs "
2799 params
->max_ioqpairs
= params
->num_queues
- 1;
2803 warn_report("drive property is deprecated; "
2804 "please use an nvme-ns device instead");
2807 if (params
->max_ioqpairs
< 1 ||
2808 params
->max_ioqpairs
> NVME_MAX_IOQPAIRS
) {
2809 error_setg(errp
, "max_ioqpairs must be between 1 and %d",
2814 if (params
->msix_qsize
< 1 ||
2815 params
->msix_qsize
> PCI_MSIX_FLAGS_QSIZE
+ 1) {
2816 error_setg(errp
, "msix_qsize must be between 1 and %d",
2817 PCI_MSIX_FLAGS_QSIZE
+ 1);
2821 if (!params
->serial
) {
2822 error_setg(errp
, "serial property not set");
2826 if (!n
->params
.cmb_size_mb
&& n
->pmrdev
) {
2827 if (host_memory_backend_is_mapped(n
->pmrdev
)) {
2828 error_setg(errp
, "can't use already busy memdev: %s",
2829 object_get_canonical_path_component(OBJECT(n
->pmrdev
)));
2833 if (!is_power_of_2(n
->pmrdev
->size
)) {
2834 error_setg(errp
, "pmr backend size needs to be power of 2 in size");
2838 host_memory_backend_set_mapped(n
->pmrdev
, true);
2842 static void nvme_init_state(NvmeCtrl
*n
)
2844 n
->num_namespaces
= NVME_MAX_NAMESPACES
;
2845 /* add one to max_ioqpairs to account for the admin queue pair */
2846 n
->reg_size
= pow2ceil(sizeof(NvmeBar
) +
2847 2 * (n
->params
.max_ioqpairs
+ 1) * NVME_DB_SIZE
);
2848 n
->sq
= g_new0(NvmeSQueue
*, n
->params
.max_ioqpairs
+ 1);
2849 n
->cq
= g_new0(NvmeCQueue
*, n
->params
.max_ioqpairs
+ 1);
2850 n
->temperature
= NVME_TEMPERATURE
;
2851 n
->features
.temp_thresh_hi
= NVME_TEMPERATURE_WARNING
;
2852 n
->starttime_ms
= qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
);
2853 n
->aer_reqs
= g_new0(NvmeRequest
*, n
->params
.aerl
+ 1);
2856 int nvme_register_namespace(NvmeCtrl
*n
, NvmeNamespace
*ns
, Error
**errp
)
2858 uint32_t nsid
= nvme_nsid(ns
);
2860 if (nsid
> NVME_MAX_NAMESPACES
) {
2861 error_setg(errp
, "invalid namespace id (must be between 0 and %d)",
2862 NVME_MAX_NAMESPACES
);
2867 for (int i
= 1; i
<= n
->num_namespaces
; i
++) {
2868 if (!nvme_ns(n
, i
)) {
2869 nsid
= ns
->params
.nsid
= i
;
2875 error_setg(errp
, "no free namespace id");
2879 if (n
->namespaces
[nsid
- 1]) {
2880 error_setg(errp
, "namespace id '%d' is already in use", nsid
);
2885 trace_pci_nvme_register_namespace(nsid
);
2887 n
->namespaces
[nsid
- 1] = ns
;
2892 static void nvme_init_cmb(NvmeCtrl
*n
, PCIDevice
*pci_dev
)
2894 NVME_CMBLOC_SET_BIR(n
->bar
.cmbloc
, NVME_CMB_BIR
);
2895 NVME_CMBLOC_SET_OFST(n
->bar
.cmbloc
, 0);
2897 NVME_CMBSZ_SET_SQS(n
->bar
.cmbsz
, 1);
2898 NVME_CMBSZ_SET_CQS(n
->bar
.cmbsz
, 0);
2899 NVME_CMBSZ_SET_LISTS(n
->bar
.cmbsz
, 1);
2900 NVME_CMBSZ_SET_RDS(n
->bar
.cmbsz
, 1);
2901 NVME_CMBSZ_SET_WDS(n
->bar
.cmbsz
, 1);
2902 NVME_CMBSZ_SET_SZU(n
->bar
.cmbsz
, 2); /* MBs */
2903 NVME_CMBSZ_SET_SZ(n
->bar
.cmbsz
, n
->params
.cmb_size_mb
);
2905 n
->cmbuf
= g_malloc0(NVME_CMBSZ_GETSIZE(n
->bar
.cmbsz
));
2906 memory_region_init_io(&n
->ctrl_mem
, OBJECT(n
), &nvme_cmb_ops
, n
,
2907 "nvme-cmb", NVME_CMBSZ_GETSIZE(n
->bar
.cmbsz
));
2908 pci_register_bar(pci_dev
, NVME_CMBLOC_BIR(n
->bar
.cmbloc
),
2909 PCI_BASE_ADDRESS_SPACE_MEMORY
|
2910 PCI_BASE_ADDRESS_MEM_TYPE_64
|
2911 PCI_BASE_ADDRESS_MEM_PREFETCH
, &n
->ctrl_mem
);
2914 static void nvme_init_pmr(NvmeCtrl
*n
, PCIDevice
*pci_dev
)
2916 /* Controller Capabilities register */
2917 NVME_CAP_SET_PMRS(n
->bar
.cap
, 1);
2919 /* PMR Capabities register */
2921 NVME_PMRCAP_SET_RDS(n
->bar
.pmrcap
, 0);
2922 NVME_PMRCAP_SET_WDS(n
->bar
.pmrcap
, 0);
2923 NVME_PMRCAP_SET_BIR(n
->bar
.pmrcap
, NVME_PMR_BIR
);
2924 NVME_PMRCAP_SET_PMRTU(n
->bar
.pmrcap
, 0);
2925 /* Turn on bit 1 support */
2926 NVME_PMRCAP_SET_PMRWBM(n
->bar
.pmrcap
, 0x02);
2927 NVME_PMRCAP_SET_PMRTO(n
->bar
.pmrcap
, 0);
2928 NVME_PMRCAP_SET_CMSS(n
->bar
.pmrcap
, 0);
2930 /* PMR Control register */
2932 NVME_PMRCTL_SET_EN(n
->bar
.pmrctl
, 0);
2934 /* PMR Status register */
2936 NVME_PMRSTS_SET_ERR(n
->bar
.pmrsts
, 0);
2937 NVME_PMRSTS_SET_NRDY(n
->bar
.pmrsts
, 0);
2938 NVME_PMRSTS_SET_HSTS(n
->bar
.pmrsts
, 0);
2939 NVME_PMRSTS_SET_CBAI(n
->bar
.pmrsts
, 0);
2941 /* PMR Elasticity Buffer Size register */
2943 NVME_PMREBS_SET_PMRSZU(n
->bar
.pmrebs
, 0);
2944 NVME_PMREBS_SET_RBB(n
->bar
.pmrebs
, 0);
2945 NVME_PMREBS_SET_PMRWBZ(n
->bar
.pmrebs
, 0);
2947 /* PMR Sustained Write Throughput register */
2949 NVME_PMRSWTP_SET_PMRSWTU(n
->bar
.pmrswtp
, 0);
2950 NVME_PMRSWTP_SET_PMRSWTV(n
->bar
.pmrswtp
, 0);
2952 /* PMR Memory Space Control register */
2954 NVME_PMRMSC_SET_CMSE(n
->bar
.pmrmsc
, 0);
2955 NVME_PMRMSC_SET_CBA(n
->bar
.pmrmsc
, 0);
2957 pci_register_bar(pci_dev
, NVME_PMRCAP_BIR(n
->bar
.pmrcap
),
2958 PCI_BASE_ADDRESS_SPACE_MEMORY
|
2959 PCI_BASE_ADDRESS_MEM_TYPE_64
|
2960 PCI_BASE_ADDRESS_MEM_PREFETCH
, &n
->pmrdev
->mr
);
2963 static void nvme_init_pci(NvmeCtrl
*n
, PCIDevice
*pci_dev
, Error
**errp
)
2965 uint8_t *pci_conf
= pci_dev
->config
;
2967 pci_conf
[PCI_INTERRUPT_PIN
] = 1;
2968 pci_config_set_prog_interface(pci_conf
, 0x2);
2970 if (n
->params
.use_intel_id
) {
2971 pci_config_set_vendor_id(pci_conf
, PCI_VENDOR_ID_INTEL
);
2972 pci_config_set_device_id(pci_conf
, 0x5845);
2974 pci_config_set_vendor_id(pci_conf
, PCI_VENDOR_ID_REDHAT
);
2975 pci_config_set_device_id(pci_conf
, PCI_DEVICE_ID_REDHAT_NVME
);
2978 pci_config_set_class(pci_conf
, PCI_CLASS_STORAGE_EXPRESS
);
2979 pcie_endpoint_cap_init(pci_dev
, 0x80);
2981 memory_region_init_io(&n
->iomem
, OBJECT(n
), &nvme_mmio_ops
, n
, "nvme",
2983 pci_register_bar(pci_dev
, 0, PCI_BASE_ADDRESS_SPACE_MEMORY
|
2984 PCI_BASE_ADDRESS_MEM_TYPE_64
, &n
->iomem
);
2985 if (msix_init_exclusive_bar(pci_dev
, n
->params
.msix_qsize
, 4, errp
)) {
2989 if (n
->params
.cmb_size_mb
) {
2990 nvme_init_cmb(n
, pci_dev
);
2991 } else if (n
->pmrdev
) {
2992 nvme_init_pmr(n
, pci_dev
);
2996 static void nvme_init_ctrl(NvmeCtrl
*n
, PCIDevice
*pci_dev
)
2998 NvmeIdCtrl
*id
= &n
->id_ctrl
;
2999 uint8_t *pci_conf
= pci_dev
->config
;
3002 id
->vid
= cpu_to_le16(pci_get_word(pci_conf
+ PCI_VENDOR_ID
));
3003 id
->ssvid
= cpu_to_le16(pci_get_word(pci_conf
+ PCI_SUBSYSTEM_VENDOR_ID
));
3004 strpadcpy((char *)id
->mn
, sizeof(id
->mn
), "QEMU NVMe Ctrl", ' ');
3005 strpadcpy((char *)id
->fr
, sizeof(id
->fr
), "1.0", ' ');
3006 strpadcpy((char *)id
->sn
, sizeof(id
->sn
), n
->params
.serial
, ' ');
3011 id
->mdts
= n
->params
.mdts
;
3012 id
->ver
= cpu_to_le32(NVME_SPEC_VER
);
3013 id
->oacs
= cpu_to_le16(0);
3016 * Because the controller always completes the Abort command immediately,
3017 * there can never be more than one concurrently executing Abort command,
3018 * so this value is never used for anything. Note that there can easily be
3019 * many Abort commands in the queues, but they are not considered
3020 * "executing" until processed by nvme_abort.
3022 * The specification recommends a value of 3 for Abort Command Limit (four
3023 * concurrently outstanding Abort commands), so lets use that though it is
3027 id
->aerl
= n
->params
.aerl
;
3028 id
->frmw
= (NVME_NUM_FW_SLOTS
<< 1) | NVME_FRMW_SLOT1_RO
;
3029 id
->lpa
= NVME_LPA_NS_SMART
| NVME_LPA_EXTENDED
;
3031 /* recommended default value (~70 C) */
3032 id
->wctemp
= cpu_to_le16(NVME_TEMPERATURE_WARNING
);
3033 id
->cctemp
= cpu_to_le16(NVME_TEMPERATURE_CRITICAL
);
3035 id
->sqes
= (0x6 << 4) | 0x6;
3036 id
->cqes
= (0x4 << 4) | 0x4;
3037 id
->nn
= cpu_to_le32(n
->num_namespaces
);
3038 id
->oncs
= cpu_to_le16(NVME_ONCS_WRITE_ZEROES
| NVME_ONCS_TIMESTAMP
|
3039 NVME_ONCS_FEATURES
| NVME_ONCS_DSM
|
3043 id
->sgls
= cpu_to_le32(NVME_CTRL_SGLS_SUPPORT_NO_ALIGN
|
3044 NVME_CTRL_SGLS_BITBUCKET
);
3046 subnqn
= g_strdup_printf("nqn.2019-08.org.qemu:%s", n
->params
.serial
);
3047 strpadcpy((char *)id
->subnqn
, sizeof(id
->subnqn
), subnqn
, '\0');
3050 id
->psd
[0].mp
= cpu_to_le16(0x9c4);
3051 id
->psd
[0].enlat
= cpu_to_le32(0x10);
3052 id
->psd
[0].exlat
= cpu_to_le32(0x4);
3054 NVME_CAP_SET_MQES(n
->bar
.cap
, 0x7ff);
3055 NVME_CAP_SET_CQR(n
->bar
.cap
, 1);
3056 NVME_CAP_SET_TO(n
->bar
.cap
, 0xf);
3057 NVME_CAP_SET_CSS(n
->bar
.cap
, NVME_CAP_CSS_NVM
);
3058 NVME_CAP_SET_CSS(n
->bar
.cap
, NVME_CAP_CSS_ADMIN_ONLY
);
3059 NVME_CAP_SET_MPSMAX(n
->bar
.cap
, 4);
3061 n
->bar
.vs
= NVME_SPEC_VER
;
3062 n
->bar
.intmc
= n
->bar
.intms
= 0;
3065 static void nvme_realize(PCIDevice
*pci_dev
, Error
**errp
)
3067 NvmeCtrl
*n
= NVME(pci_dev
);
3069 Error
*local_err
= NULL
;
3071 nvme_check_constraints(n
, &local_err
);
3073 error_propagate(errp
, local_err
);
3077 qbus_create_inplace(&n
->bus
, sizeof(NvmeBus
), TYPE_NVME_BUS
,
3078 &pci_dev
->qdev
, n
->parent_obj
.qdev
.id
);
3081 nvme_init_pci(n
, pci_dev
, &local_err
);
3083 error_propagate(errp
, local_err
);
3087 nvme_init_ctrl(n
, pci_dev
);
3089 /* setup a namespace if the controller drive property was given */
3090 if (n
->namespace.blkconf
.blk
) {
3092 ns
->params
.nsid
= 1;
3094 if (nvme_ns_setup(n
, ns
, errp
)) {
3100 static void nvme_exit(PCIDevice
*pci_dev
)
3102 NvmeCtrl
*n
= NVME(pci_dev
);
3104 nvme_ctrl_shutdown(n
);
3107 g_free(n
->aer_reqs
);
3109 if (n
->params
.cmb_size_mb
) {
3114 host_memory_backend_set_mapped(n
->pmrdev
, false);
3116 msix_uninit_exclusive_bar(pci_dev
);
3119 static Property nvme_props
[] = {
3120 DEFINE_BLOCK_PROPERTIES(NvmeCtrl
, namespace.blkconf
),
3121 DEFINE_PROP_LINK("pmrdev", NvmeCtrl
, pmrdev
, TYPE_MEMORY_BACKEND
,
3122 HostMemoryBackend
*),
3123 DEFINE_PROP_STRING("serial", NvmeCtrl
, params
.serial
),
3124 DEFINE_PROP_UINT32("cmb_size_mb", NvmeCtrl
, params
.cmb_size_mb
, 0),
3125 DEFINE_PROP_UINT32("num_queues", NvmeCtrl
, params
.num_queues
, 0),
3126 DEFINE_PROP_UINT32("max_ioqpairs", NvmeCtrl
, params
.max_ioqpairs
, 64),
3127 DEFINE_PROP_UINT16("msix_qsize", NvmeCtrl
, params
.msix_qsize
, 65),
3128 DEFINE_PROP_UINT8("aerl", NvmeCtrl
, params
.aerl
, 3),
3129 DEFINE_PROP_UINT32("aer_max_queued", NvmeCtrl
, params
.aer_max_queued
, 64),
3130 DEFINE_PROP_UINT8("mdts", NvmeCtrl
, params
.mdts
, 7),
3131 DEFINE_PROP_BOOL("use-intel-id", NvmeCtrl
, params
.use_intel_id
, false),
3132 DEFINE_PROP_END_OF_LIST(),
3135 static const VMStateDescription nvme_vmstate
= {
3140 static void nvme_class_init(ObjectClass
*oc
, void *data
)
3142 DeviceClass
*dc
= DEVICE_CLASS(oc
);
3143 PCIDeviceClass
*pc
= PCI_DEVICE_CLASS(oc
);
3145 pc
->realize
= nvme_realize
;
3146 pc
->exit
= nvme_exit
;
3147 pc
->class_id
= PCI_CLASS_STORAGE_EXPRESS
;
3150 set_bit(DEVICE_CATEGORY_STORAGE
, dc
->categories
);
3151 dc
->desc
= "Non-Volatile Memory Express";
3152 device_class_set_props(dc
, nvme_props
);
3153 dc
->vmsd
= &nvme_vmstate
;
3156 static void nvme_instance_init(Object
*obj
)
3158 NvmeCtrl
*s
= NVME(obj
);
3160 if (s
->namespace.blkconf
.blk
) {
3161 device_add_bootindex_property(obj
, &s
->namespace.blkconf
.bootindex
,
3162 "bootindex", "/namespace@1,0",
3167 static const TypeInfo nvme_info
= {
3169 .parent
= TYPE_PCI_DEVICE
,
3170 .instance_size
= sizeof(NvmeCtrl
),
3171 .instance_init
= nvme_instance_init
,
3172 .class_init
= nvme_class_init
,
3173 .interfaces
= (InterfaceInfo
[]) {
3174 { INTERFACE_PCIE_DEVICE
},
3179 static const TypeInfo nvme_bus_info
= {
3180 .name
= TYPE_NVME_BUS
,
3182 .instance_size
= sizeof(NvmeBus
),
3185 static void nvme_register_types(void)
3187 type_register_static(&nvme_info
);
3188 type_register_static(&nvme_bus_info
);
3191 type_init(nvme_register_types
)