2 * QEMU NVM Express Controller
4 * Copyright (c) 2012, Intel Corporation
6 * Written by Keith Busch <keith.busch@intel.com>
8 * This code is licensed under the GNU GPL v2 or later.
12 * Reference Specs: http://www.nvmexpress.org, 1.2, 1.1, 1.0e
14 * https://nvmexpress.org/developers/nvme-specification/
19 * -drive file=<file>,if=none,id=<drive_id>
20 * -device nvme,serial=<serial>,id=<bus_name>, \
21 * cmb_size_mb=<cmb_size_mb[optional]>, \
22 * [pmrdev=<mem_backend_file_id>,] \
23 * max_ioqpairs=<N[optional]>, \
24 * aerl=<N[optional]>, aer_max_queued=<N[optional]>, \
26 * -device nvme-ns,drive=<drive_id>,bus=bus_name,nsid=<nsid>
28 * Note cmb_size_mb denotes size of CMB in MB. CMB is assumed to be at
29 * offset 0 in BAR2 and supports only WDS, RDS and SQS for now.
31 * cmb_size_mb= and pmrdev= options are mutually exclusive due to limitation
32 * in available BAR's. cmb_size_mb= will take precedence over pmrdev= when
34 * Enabling pmr emulation can be achieved by pointing to memory-backend-file.
36 * -object memory-backend-file,id=<mem_id>,share=on,mem-path=<file_path>, \
37 * size=<size> .... -device nvme,...,pmrdev=<mem_id>
40 * nvme device parameters
41 * ~~~~~~~~~~~~~~~~~~~~~~
43 * The Asynchronous Event Request Limit (AERL). Indicates the maximum number
44 * of concurrently outstanding Asynchronous Event Request commands suppoert
45 * by the controller. This is a 0's based value.
48 * This is the maximum number of events that the device will enqueue for
49 * completion when there are no oustanding AERs. When the maximum number of
50 * enqueued events are reached, subsequent events will be dropped.
54 #include "qemu/osdep.h"
55 #include "qemu/units.h"
56 #include "qemu/error-report.h"
57 #include "hw/block/block.h"
58 #include "hw/pci/msix.h"
59 #include "hw/pci/pci.h"
60 #include "hw/qdev-properties.h"
61 #include "migration/vmstate.h"
62 #include "sysemu/sysemu.h"
63 #include "qapi/error.h"
64 #include "qapi/visitor.h"
65 #include "sysemu/hostmem.h"
66 #include "sysemu/block-backend.h"
67 #include "exec/memory.h"
69 #include "qemu/module.h"
70 #include "qemu/cutils.h"
75 #define NVME_MAX_IOQPAIRS 0xffff
76 #define NVME_DB_SIZE 4
77 #define NVME_SPEC_VER 0x00010300
78 #define NVME_CMB_BIR 2
79 #define NVME_PMR_BIR 2
80 #define NVME_TEMPERATURE 0x143
81 #define NVME_TEMPERATURE_WARNING 0x157
82 #define NVME_TEMPERATURE_CRITICAL 0x175
83 #define NVME_NUM_FW_SLOTS 1
85 #define NVME_GUEST_ERR(trace, fmt, ...) \
87 (trace_##trace)(__VA_ARGS__); \
88 qemu_log_mask(LOG_GUEST_ERROR, #trace \
89 " in %s: " fmt "\n", __func__, ## __VA_ARGS__); \
92 static const bool nvme_feature_support
[NVME_FID_MAX
] = {
93 [NVME_ARBITRATION
] = true,
94 [NVME_POWER_MANAGEMENT
] = true,
95 [NVME_TEMPERATURE_THRESHOLD
] = true,
96 [NVME_ERROR_RECOVERY
] = true,
97 [NVME_VOLATILE_WRITE_CACHE
] = true,
98 [NVME_NUMBER_OF_QUEUES
] = true,
99 [NVME_INTERRUPT_COALESCING
] = true,
100 [NVME_INTERRUPT_VECTOR_CONF
] = true,
101 [NVME_WRITE_ATOMICITY
] = true,
102 [NVME_ASYNCHRONOUS_EVENT_CONF
] = true,
103 [NVME_TIMESTAMP
] = true,
106 static const uint32_t nvme_feature_cap
[NVME_FID_MAX
] = {
107 [NVME_TEMPERATURE_THRESHOLD
] = NVME_FEAT_CAP_CHANGE
,
108 [NVME_ERROR_RECOVERY
] = NVME_FEAT_CAP_CHANGE
| NVME_FEAT_CAP_NS
,
109 [NVME_VOLATILE_WRITE_CACHE
] = NVME_FEAT_CAP_CHANGE
,
110 [NVME_NUMBER_OF_QUEUES
] = NVME_FEAT_CAP_CHANGE
,
111 [NVME_ASYNCHRONOUS_EVENT_CONF
] = NVME_FEAT_CAP_CHANGE
,
112 [NVME_TIMESTAMP
] = NVME_FEAT_CAP_CHANGE
,
115 static void nvme_process_sq(void *opaque
);
117 static uint16_t nvme_cid(NvmeRequest
*req
)
123 return le16_to_cpu(req
->cqe
.cid
);
126 static uint16_t nvme_sqid(NvmeRequest
*req
)
128 return le16_to_cpu(req
->sq
->sqid
);
131 static bool nvme_addr_is_cmb(NvmeCtrl
*n
, hwaddr addr
)
133 hwaddr low
= n
->ctrl_mem
.addr
;
134 hwaddr hi
= n
->ctrl_mem
.addr
+ int128_get64(n
->ctrl_mem
.size
);
136 return addr
>= low
&& addr
< hi
;
139 static inline void *nvme_addr_to_cmb(NvmeCtrl
*n
, hwaddr addr
)
141 assert(nvme_addr_is_cmb(n
, addr
));
143 return &n
->cmbuf
[addr
- n
->ctrl_mem
.addr
];
146 static int nvme_addr_read(NvmeCtrl
*n
, hwaddr addr
, void *buf
, int size
)
148 hwaddr hi
= addr
+ size
- 1;
153 if (n
->bar
.cmbsz
&& nvme_addr_is_cmb(n
, addr
) && nvme_addr_is_cmb(n
, hi
)) {
154 memcpy(buf
, nvme_addr_to_cmb(n
, addr
), size
);
158 return pci_dma_read(&n
->parent_obj
, addr
, buf
, size
);
161 static bool nvme_nsid_valid(NvmeCtrl
*n
, uint32_t nsid
)
163 return nsid
&& (nsid
== NVME_NSID_BROADCAST
|| nsid
<= n
->num_namespaces
);
166 static int nvme_check_sqid(NvmeCtrl
*n
, uint16_t sqid
)
168 return sqid
< n
->params
.max_ioqpairs
+ 1 && n
->sq
[sqid
] != NULL
? 0 : -1;
171 static int nvme_check_cqid(NvmeCtrl
*n
, uint16_t cqid
)
173 return cqid
< n
->params
.max_ioqpairs
+ 1 && n
->cq
[cqid
] != NULL
? 0 : -1;
176 static void nvme_inc_cq_tail(NvmeCQueue
*cq
)
179 if (cq
->tail
>= cq
->size
) {
181 cq
->phase
= !cq
->phase
;
185 static void nvme_inc_sq_head(NvmeSQueue
*sq
)
187 sq
->head
= (sq
->head
+ 1) % sq
->size
;
190 static uint8_t nvme_cq_full(NvmeCQueue
*cq
)
192 return (cq
->tail
+ 1) % cq
->size
== cq
->head
;
195 static uint8_t nvme_sq_empty(NvmeSQueue
*sq
)
197 return sq
->head
== sq
->tail
;
200 static void nvme_irq_check(NvmeCtrl
*n
)
202 if (msix_enabled(&(n
->parent_obj
))) {
205 if (~n
->bar
.intms
& n
->irq_status
) {
206 pci_irq_assert(&n
->parent_obj
);
208 pci_irq_deassert(&n
->parent_obj
);
212 static void nvme_irq_assert(NvmeCtrl
*n
, NvmeCQueue
*cq
)
214 if (cq
->irq_enabled
) {
215 if (msix_enabled(&(n
->parent_obj
))) {
216 trace_pci_nvme_irq_msix(cq
->vector
);
217 msix_notify(&(n
->parent_obj
), cq
->vector
);
219 trace_pci_nvme_irq_pin();
220 assert(cq
->vector
< 32);
221 n
->irq_status
|= 1 << cq
->vector
;
225 trace_pci_nvme_irq_masked();
229 static void nvme_irq_deassert(NvmeCtrl
*n
, NvmeCQueue
*cq
)
231 if (cq
->irq_enabled
) {
232 if (msix_enabled(&(n
->parent_obj
))) {
235 assert(cq
->vector
< 32);
236 n
->irq_status
&= ~(1 << cq
->vector
);
242 static void nvme_req_clear(NvmeRequest
*req
)
246 memset(&req
->cqe
, 0x0, sizeof(req
->cqe
));
247 req
->status
= NVME_SUCCESS
;
250 static void nvme_req_exit(NvmeRequest
*req
)
253 qemu_sglist_destroy(&req
->qsg
);
257 qemu_iovec_destroy(&req
->iov
);
261 static uint16_t nvme_map_addr_cmb(NvmeCtrl
*n
, QEMUIOVector
*iov
, hwaddr addr
,
268 trace_pci_nvme_map_addr_cmb(addr
, len
);
270 if (!nvme_addr_is_cmb(n
, addr
) || !nvme_addr_is_cmb(n
, addr
+ len
- 1)) {
271 return NVME_DATA_TRAS_ERROR
;
274 qemu_iovec_add(iov
, nvme_addr_to_cmb(n
, addr
), len
);
279 static uint16_t nvme_map_addr(NvmeCtrl
*n
, QEMUSGList
*qsg
, QEMUIOVector
*iov
,
280 hwaddr addr
, size_t len
)
286 trace_pci_nvme_map_addr(addr
, len
);
288 if (nvme_addr_is_cmb(n
, addr
)) {
289 if (qsg
&& qsg
->sg
) {
290 return NVME_INVALID_USE_OF_CMB
| NVME_DNR
;
296 qemu_iovec_init(iov
, 1);
299 return nvme_map_addr_cmb(n
, iov
, addr
, len
);
302 if (iov
&& iov
->iov
) {
303 return NVME_INVALID_USE_OF_CMB
| NVME_DNR
;
309 pci_dma_sglist_init(qsg
, &n
->parent_obj
, 1);
312 qemu_sglist_add(qsg
, addr
, len
);
317 static uint16_t nvme_map_prp(NvmeCtrl
*n
, uint64_t prp1
, uint64_t prp2
,
318 uint32_t len
, NvmeRequest
*req
)
320 hwaddr trans_len
= n
->page_size
- (prp1
% n
->page_size
);
321 trans_len
= MIN(len
, trans_len
);
322 int num_prps
= (len
>> n
->page_bits
) + 1;
324 bool prp_list_in_cmb
= false;
327 QEMUSGList
*qsg
= &req
->qsg
;
328 QEMUIOVector
*iov
= &req
->iov
;
330 trace_pci_nvme_map_prp(trans_len
, len
, prp1
, prp2
, num_prps
);
332 if (nvme_addr_is_cmb(n
, prp1
)) {
333 qemu_iovec_init(iov
, num_prps
);
335 pci_dma_sglist_init(qsg
, &n
->parent_obj
, num_prps
);
338 status
= nvme_map_addr(n
, qsg
, iov
, prp1
, trans_len
);
345 if (len
> n
->page_size
) {
346 uint64_t prp_list
[n
->max_prp_ents
];
347 uint32_t nents
, prp_trans
;
350 if (nvme_addr_is_cmb(n
, prp2
)) {
351 prp_list_in_cmb
= true;
354 nents
= (len
+ n
->page_size
- 1) >> n
->page_bits
;
355 prp_trans
= MIN(n
->max_prp_ents
, nents
) * sizeof(uint64_t);
356 ret
= nvme_addr_read(n
, prp2
, (void *)prp_list
, prp_trans
);
358 trace_pci_nvme_err_addr_read(prp2
);
359 return NVME_DATA_TRAS_ERROR
;
362 uint64_t prp_ent
= le64_to_cpu(prp_list
[i
]);
364 if (i
== n
->max_prp_ents
- 1 && len
> n
->page_size
) {
365 if (unlikely(prp_ent
& (n
->page_size
- 1))) {
366 trace_pci_nvme_err_invalid_prplist_ent(prp_ent
);
367 return NVME_INVALID_PRP_OFFSET
| NVME_DNR
;
370 if (prp_list_in_cmb
!= nvme_addr_is_cmb(n
, prp_ent
)) {
371 return NVME_INVALID_USE_OF_CMB
| NVME_DNR
;
375 nents
= (len
+ n
->page_size
- 1) >> n
->page_bits
;
376 prp_trans
= MIN(n
->max_prp_ents
, nents
) * sizeof(uint64_t);
377 ret
= nvme_addr_read(n
, prp_ent
, (void *)prp_list
,
380 trace_pci_nvme_err_addr_read(prp_ent
);
381 return NVME_DATA_TRAS_ERROR
;
383 prp_ent
= le64_to_cpu(prp_list
[i
]);
386 if (unlikely(prp_ent
& (n
->page_size
- 1))) {
387 trace_pci_nvme_err_invalid_prplist_ent(prp_ent
);
388 return NVME_INVALID_PRP_OFFSET
| NVME_DNR
;
391 trans_len
= MIN(len
, n
->page_size
);
392 status
= nvme_map_addr(n
, qsg
, iov
, prp_ent
, trans_len
);
401 if (unlikely(prp2
& (n
->page_size
- 1))) {
402 trace_pci_nvme_err_invalid_prp2_align(prp2
);
403 return NVME_INVALID_PRP_OFFSET
| NVME_DNR
;
405 status
= nvme_map_addr(n
, qsg
, iov
, prp2
, len
);
416 * Map 'nsgld' data descriptors from 'segment'. The function will subtract the
417 * number of bytes mapped in len.
419 static uint16_t nvme_map_sgl_data(NvmeCtrl
*n
, QEMUSGList
*qsg
,
421 NvmeSglDescriptor
*segment
, uint64_t nsgld
,
422 size_t *len
, NvmeRequest
*req
)
424 dma_addr_t addr
, trans_len
;
428 for (int i
= 0; i
< nsgld
; i
++) {
429 uint8_t type
= NVME_SGL_TYPE(segment
[i
].type
);
432 case NVME_SGL_DESCR_TYPE_BIT_BUCKET
:
433 if (req
->cmd
.opcode
== NVME_CMD_WRITE
) {
436 case NVME_SGL_DESCR_TYPE_DATA_BLOCK
:
438 case NVME_SGL_DESCR_TYPE_SEGMENT
:
439 case NVME_SGL_DESCR_TYPE_LAST_SEGMENT
:
440 return NVME_INVALID_NUM_SGL_DESCRS
| NVME_DNR
;
442 return NVME_SGL_DESCR_TYPE_INVALID
| NVME_DNR
;
445 dlen
= le32_to_cpu(segment
[i
].len
);
453 * All data has been mapped, but the SGL contains additional
454 * segments and/or descriptors. The controller might accept
455 * ignoring the rest of the SGL.
457 uint32_t sgls
= le32_to_cpu(n
->id_ctrl
.sgls
);
458 if (sgls
& NVME_CTRL_SGLS_EXCESS_LENGTH
) {
462 trace_pci_nvme_err_invalid_sgl_excess_length(nvme_cid(req
));
463 return NVME_DATA_SGL_LEN_INVALID
| NVME_DNR
;
466 trans_len
= MIN(*len
, dlen
);
468 if (type
== NVME_SGL_DESCR_TYPE_BIT_BUCKET
) {
472 addr
= le64_to_cpu(segment
[i
].addr
);
474 if (UINT64_MAX
- addr
< dlen
) {
475 return NVME_DATA_SGL_LEN_INVALID
| NVME_DNR
;
478 status
= nvme_map_addr(n
, qsg
, iov
, addr
, trans_len
);
490 static uint16_t nvme_map_sgl(NvmeCtrl
*n
, QEMUSGList
*qsg
, QEMUIOVector
*iov
,
491 NvmeSglDescriptor sgl
, size_t len
,
495 * Read the segment in chunks of 256 descriptors (one 4k page) to avoid
496 * dynamically allocating a potentially huge SGL. The spec allows the SGL
497 * to be larger (as in number of bytes required to describe the SGL
498 * descriptors and segment chain) than the command transfer size, so it is
499 * not bounded by MDTS.
501 const int SEG_CHUNK_SIZE
= 256;
503 NvmeSglDescriptor segment
[SEG_CHUNK_SIZE
], *sgld
, *last_sgld
;
507 bool sgl_in_cmb
= false;
512 addr
= le64_to_cpu(sgl
.addr
);
514 trace_pci_nvme_map_sgl(nvme_cid(req
), NVME_SGL_TYPE(sgl
.type
), len
);
517 * If the entire transfer can be described with a single data block it can
518 * be mapped directly.
520 if (NVME_SGL_TYPE(sgl
.type
) == NVME_SGL_DESCR_TYPE_DATA_BLOCK
) {
521 status
= nvme_map_sgl_data(n
, qsg
, iov
, sgld
, 1, &len
, req
);
530 * If the segment is located in the CMB, the submission queue of the
531 * request must also reside there.
533 if (nvme_addr_is_cmb(n
, addr
)) {
534 if (!nvme_addr_is_cmb(n
, req
->sq
->dma_addr
)) {
535 return NVME_INVALID_USE_OF_CMB
| NVME_DNR
;
542 switch (NVME_SGL_TYPE(sgld
->type
)) {
543 case NVME_SGL_DESCR_TYPE_SEGMENT
:
544 case NVME_SGL_DESCR_TYPE_LAST_SEGMENT
:
547 return NVME_INVALID_SGL_SEG_DESCR
| NVME_DNR
;
550 seg_len
= le32_to_cpu(sgld
->len
);
552 /* check the length of the (Last) Segment descriptor */
553 if ((!seg_len
|| seg_len
& 0xf) &&
554 (NVME_SGL_TYPE(sgld
->type
) != NVME_SGL_DESCR_TYPE_BIT_BUCKET
)) {
555 return NVME_INVALID_SGL_SEG_DESCR
| NVME_DNR
;
558 if (UINT64_MAX
- addr
< seg_len
) {
559 return NVME_DATA_SGL_LEN_INVALID
| NVME_DNR
;
562 nsgld
= seg_len
/ sizeof(NvmeSglDescriptor
);
564 while (nsgld
> SEG_CHUNK_SIZE
) {
565 if (nvme_addr_read(n
, addr
, segment
, sizeof(segment
))) {
566 trace_pci_nvme_err_addr_read(addr
);
567 status
= NVME_DATA_TRAS_ERROR
;
571 status
= nvme_map_sgl_data(n
, qsg
, iov
, segment
, SEG_CHUNK_SIZE
,
577 nsgld
-= SEG_CHUNK_SIZE
;
578 addr
+= SEG_CHUNK_SIZE
* sizeof(NvmeSglDescriptor
);
581 ret
= nvme_addr_read(n
, addr
, segment
, nsgld
*
582 sizeof(NvmeSglDescriptor
));
584 trace_pci_nvme_err_addr_read(addr
);
585 status
= NVME_DATA_TRAS_ERROR
;
589 last_sgld
= &segment
[nsgld
- 1];
592 * If the segment ends with a Data Block or Bit Bucket Descriptor Type,
595 switch (NVME_SGL_TYPE(last_sgld
->type
)) {
596 case NVME_SGL_DESCR_TYPE_DATA_BLOCK
:
597 case NVME_SGL_DESCR_TYPE_BIT_BUCKET
:
598 status
= nvme_map_sgl_data(n
, qsg
, iov
, segment
, nsgld
, &len
, req
);
610 * If the last descriptor was not a Data Block or Bit Bucket, then the
611 * current segment must not be a Last Segment.
613 if (NVME_SGL_TYPE(sgld
->type
) == NVME_SGL_DESCR_TYPE_LAST_SEGMENT
) {
614 status
= NVME_INVALID_SGL_SEG_DESCR
| NVME_DNR
;
619 addr
= le64_to_cpu(sgld
->addr
);
622 * Do not map the last descriptor; it will be a Segment or Last Segment
623 * descriptor and is handled by the next iteration.
625 status
= nvme_map_sgl_data(n
, qsg
, iov
, segment
, nsgld
- 1, &len
, req
);
631 * If the next segment is in the CMB, make sure that the sgl was
632 * already located there.
634 if (sgl_in_cmb
!= nvme_addr_is_cmb(n
, addr
)) {
635 status
= NVME_INVALID_USE_OF_CMB
| NVME_DNR
;
641 /* if there is any residual left in len, the SGL was too short */
643 status
= NVME_DATA_SGL_LEN_INVALID
| NVME_DNR
;
651 qemu_iovec_destroy(iov
);
655 qemu_sglist_destroy(qsg
);
661 static uint16_t nvme_map_dptr(NvmeCtrl
*n
, size_t len
, NvmeRequest
*req
)
665 switch (NVME_CMD_FLAGS_PSDT(req
->cmd
.flags
)) {
667 prp1
= le64_to_cpu(req
->cmd
.dptr
.prp1
);
668 prp2
= le64_to_cpu(req
->cmd
.dptr
.prp2
);
670 return nvme_map_prp(n
, prp1
, prp2
, len
, req
);
671 case NVME_PSDT_SGL_MPTR_CONTIGUOUS
:
672 case NVME_PSDT_SGL_MPTR_SGL
:
673 /* SGLs shall not be used for Admin commands in NVMe over PCIe */
674 if (!req
->sq
->sqid
) {
675 return NVME_INVALID_FIELD
| NVME_DNR
;
678 return nvme_map_sgl(n
, &req
->qsg
, &req
->iov
, req
->cmd
.dptr
.sgl
, len
,
681 return NVME_INVALID_FIELD
;
685 static uint16_t nvme_dma(NvmeCtrl
*n
, uint8_t *ptr
, uint32_t len
,
686 DMADirection dir
, NvmeRequest
*req
)
688 uint16_t status
= NVME_SUCCESS
;
690 status
= nvme_map_dptr(n
, len
, req
);
695 /* assert that only one of qsg and iov carries data */
696 assert((req
->qsg
.nsg
> 0) != (req
->iov
.niov
> 0));
698 if (req
->qsg
.nsg
> 0) {
701 if (dir
== DMA_DIRECTION_TO_DEVICE
) {
702 residual
= dma_buf_write(ptr
, len
, &req
->qsg
);
704 residual
= dma_buf_read(ptr
, len
, &req
->qsg
);
707 if (unlikely(residual
)) {
708 trace_pci_nvme_err_invalid_dma();
709 status
= NVME_INVALID_FIELD
| NVME_DNR
;
714 if (dir
== DMA_DIRECTION_TO_DEVICE
) {
715 bytes
= qemu_iovec_to_buf(&req
->iov
, 0, ptr
, len
);
717 bytes
= qemu_iovec_from_buf(&req
->iov
, 0, ptr
, len
);
720 if (unlikely(bytes
!= len
)) {
721 trace_pci_nvme_err_invalid_dma();
722 status
= NVME_INVALID_FIELD
| NVME_DNR
;
729 static void nvme_post_cqes(void *opaque
)
731 NvmeCQueue
*cq
= opaque
;
732 NvmeCtrl
*n
= cq
->ctrl
;
733 NvmeRequest
*req
, *next
;
736 QTAILQ_FOREACH_SAFE(req
, &cq
->req_list
, entry
, next
) {
740 if (nvme_cq_full(cq
)) {
745 req
->cqe
.status
= cpu_to_le16((req
->status
<< 1) | cq
->phase
);
746 req
->cqe
.sq_id
= cpu_to_le16(sq
->sqid
);
747 req
->cqe
.sq_head
= cpu_to_le16(sq
->head
);
748 addr
= cq
->dma_addr
+ cq
->tail
* n
->cqe_size
;
749 ret
= pci_dma_write(&n
->parent_obj
, addr
, (void *)&req
->cqe
,
752 trace_pci_nvme_err_addr_write(addr
);
753 trace_pci_nvme_err_cfs();
754 n
->bar
.csts
= NVME_CSTS_FAILED
;
757 QTAILQ_REMOVE(&cq
->req_list
, req
, entry
);
758 nvme_inc_cq_tail(cq
);
760 QTAILQ_INSERT_TAIL(&sq
->req_list
, req
, entry
);
762 if (cq
->tail
!= cq
->head
) {
763 nvme_irq_assert(n
, cq
);
767 static void nvme_enqueue_req_completion(NvmeCQueue
*cq
, NvmeRequest
*req
)
769 assert(cq
->cqid
== req
->sq
->cqid
);
770 trace_pci_nvme_enqueue_req_completion(nvme_cid(req
), cq
->cqid
,
774 trace_pci_nvme_err_req_status(nvme_cid(req
), nvme_nsid(req
->ns
),
775 req
->status
, req
->cmd
.opcode
);
778 QTAILQ_REMOVE(&req
->sq
->out_req_list
, req
, entry
);
779 QTAILQ_INSERT_TAIL(&cq
->req_list
, req
, entry
);
780 timer_mod(cq
->timer
, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) + 500);
783 static void nvme_process_aers(void *opaque
)
785 NvmeCtrl
*n
= opaque
;
786 NvmeAsyncEvent
*event
, *next
;
788 trace_pci_nvme_process_aers(n
->aer_queued
);
790 QTAILQ_FOREACH_SAFE(event
, &n
->aer_queue
, entry
, next
) {
792 NvmeAerResult
*result
;
794 /* can't post cqe if there is nothing to complete */
795 if (!n
->outstanding_aers
) {
796 trace_pci_nvme_no_outstanding_aers();
800 /* ignore if masked (cqe posted, but event not cleared) */
801 if (n
->aer_mask
& (1 << event
->result
.event_type
)) {
802 trace_pci_nvme_aer_masked(event
->result
.event_type
, n
->aer_mask
);
806 QTAILQ_REMOVE(&n
->aer_queue
, event
, entry
);
809 n
->aer_mask
|= 1 << event
->result
.event_type
;
810 n
->outstanding_aers
--;
812 req
= n
->aer_reqs
[n
->outstanding_aers
];
814 result
= (NvmeAerResult
*) &req
->cqe
.result
;
815 result
->event_type
= event
->result
.event_type
;
816 result
->event_info
= event
->result
.event_info
;
817 result
->log_page
= event
->result
.log_page
;
820 trace_pci_nvme_aer_post_cqe(result
->event_type
, result
->event_info
,
823 nvme_enqueue_req_completion(&n
->admin_cq
, req
);
827 static void nvme_enqueue_event(NvmeCtrl
*n
, uint8_t event_type
,
828 uint8_t event_info
, uint8_t log_page
)
830 NvmeAsyncEvent
*event
;
832 trace_pci_nvme_enqueue_event(event_type
, event_info
, log_page
);
834 if (n
->aer_queued
== n
->params
.aer_max_queued
) {
835 trace_pci_nvme_enqueue_event_noqueue(n
->aer_queued
);
839 event
= g_new(NvmeAsyncEvent
, 1);
840 event
->result
= (NvmeAerResult
) {
841 .event_type
= event_type
,
842 .event_info
= event_info
,
843 .log_page
= log_page
,
846 QTAILQ_INSERT_TAIL(&n
->aer_queue
, event
, entry
);
849 nvme_process_aers(n
);
852 static void nvme_clear_events(NvmeCtrl
*n
, uint8_t event_type
)
854 n
->aer_mask
&= ~(1 << event_type
);
855 if (!QTAILQ_EMPTY(&n
->aer_queue
)) {
856 nvme_process_aers(n
);
860 static inline uint16_t nvme_check_mdts(NvmeCtrl
*n
, size_t len
)
862 uint8_t mdts
= n
->params
.mdts
;
864 if (mdts
&& len
> n
->page_size
<< mdts
) {
865 return NVME_INVALID_FIELD
| NVME_DNR
;
871 static inline uint16_t nvme_check_bounds(NvmeNamespace
*ns
, uint64_t slba
,
874 uint64_t nsze
= le64_to_cpu(ns
->id_ns
.nsze
);
876 if (unlikely(UINT64_MAX
- slba
< nlb
|| slba
+ nlb
> nsze
)) {
877 return NVME_LBA_RANGE
| NVME_DNR
;
883 static uint16_t nvme_check_dulbe(NvmeNamespace
*ns
, uint64_t slba
,
886 BlockDriverState
*bs
= blk_bs(ns
->blkconf
.blk
);
888 int64_t pnum
= 0, bytes
= nvme_l2b(ns
, nlb
);
889 int64_t offset
= nvme_l2b(ns
, slba
);
893 Error
*local_err
= NULL
;
896 * `pnum` holds the number of bytes after offset that shares the same
897 * allocation status as the byte at offset. If `pnum` is different from
898 * `bytes`, we should check the allocation status of the next range and
899 * continue this until all bytes have been checked.
904 ret
= bdrv_block_status(bs
, offset
, bytes
, &pnum
, NULL
, NULL
);
906 error_setg_errno(&local_err
, -ret
, "unable to get block status");
907 error_report_err(local_err
);
909 return NVME_INTERNAL_DEV_ERROR
;
912 zeroed
= !!(ret
& BDRV_BLOCK_ZERO
);
914 trace_pci_nvme_block_status(offset
, bytes
, pnum
, ret
, zeroed
);
921 } while (pnum
!= bytes
);
926 static void nvme_aio_err(NvmeRequest
*req
, int ret
)
928 uint16_t status
= NVME_SUCCESS
;
929 Error
*local_err
= NULL
;
931 switch (req
->cmd
.opcode
) {
933 status
= NVME_UNRECOVERED_READ
;
937 case NVME_CMD_WRITE_ZEROES
:
938 status
= NVME_WRITE_FAULT
;
941 status
= NVME_INTERNAL_DEV_ERROR
;
945 trace_pci_nvme_err_aio(nvme_cid(req
), strerror(ret
), status
);
947 error_setg_errno(&local_err
, -ret
, "aio failed");
948 error_report_err(local_err
);
951 * Set the command status code to the first encountered error but allow a
952 * subsequent Internal Device Error to trump it.
954 if (req
->status
&& status
!= NVME_INTERNAL_DEV_ERROR
) {
958 req
->status
= status
;
961 static void nvme_rw_cb(void *opaque
, int ret
)
963 NvmeRequest
*req
= opaque
;
964 NvmeNamespace
*ns
= req
->ns
;
966 BlockBackend
*blk
= ns
->blkconf
.blk
;
967 BlockAcctCookie
*acct
= &req
->acct
;
968 BlockAcctStats
*stats
= blk_get_stats(blk
);
970 trace_pci_nvme_rw_cb(nvme_cid(req
), blk_name(blk
));
973 block_acct_done(stats
, acct
);
975 block_acct_failed(stats
, acct
);
976 nvme_aio_err(req
, ret
);
979 nvme_enqueue_req_completion(nvme_cq(req
), req
);
982 static void nvme_aio_discard_cb(void *opaque
, int ret
)
984 NvmeRequest
*req
= opaque
;
985 uintptr_t *discards
= (uintptr_t *)&req
->opaque
;
987 trace_pci_nvme_aio_discard_cb(nvme_cid(req
));
990 nvme_aio_err(req
, ret
);
999 nvme_enqueue_req_completion(nvme_cq(req
), req
);
1002 struct nvme_compare_ctx
{
1008 static void nvme_compare_cb(void *opaque
, int ret
)
1010 NvmeRequest
*req
= opaque
;
1011 NvmeNamespace
*ns
= req
->ns
;
1012 struct nvme_compare_ctx
*ctx
= req
->opaque
;
1013 g_autofree
uint8_t *buf
= NULL
;
1016 trace_pci_nvme_compare_cb(nvme_cid(req
));
1019 block_acct_done(blk_get_stats(ns
->blkconf
.blk
), &req
->acct
);
1021 block_acct_failed(blk_get_stats(ns
->blkconf
.blk
), &req
->acct
);
1022 nvme_aio_err(req
, ret
);
1026 buf
= g_malloc(ctx
->len
);
1028 status
= nvme_dma(nvme_ctrl(req
), buf
, ctx
->len
, DMA_DIRECTION_TO_DEVICE
,
1031 req
->status
= status
;
1035 if (memcmp(buf
, ctx
->bounce
, ctx
->len
)) {
1036 req
->status
= NVME_CMP_FAILURE
;
1040 qemu_iovec_destroy(&ctx
->iov
);
1041 g_free(ctx
->bounce
);
1044 nvme_enqueue_req_completion(nvme_cq(req
), req
);
1047 static uint16_t nvme_dsm(NvmeCtrl
*n
, NvmeRequest
*req
)
1049 NvmeNamespace
*ns
= req
->ns
;
1050 NvmeDsmCmd
*dsm
= (NvmeDsmCmd
*) &req
->cmd
;
1052 uint32_t attr
= le32_to_cpu(dsm
->attributes
);
1053 uint32_t nr
= (le32_to_cpu(dsm
->nr
) & 0xff) + 1;
1055 uint16_t status
= NVME_SUCCESS
;
1057 trace_pci_nvme_dsm(nvme_cid(req
), nvme_nsid(ns
), nr
, attr
);
1059 if (attr
& NVME_DSMGMT_AD
) {
1062 NvmeDsmRange range
[nr
];
1063 uintptr_t *discards
= (uintptr_t *)&req
->opaque
;
1065 status
= nvme_dma(n
, (uint8_t *)range
, sizeof(range
),
1066 DMA_DIRECTION_TO_DEVICE
, req
);
1072 * AIO callbacks may be called immediately, so initialize discards to 1
1073 * to make sure the the callback does not complete the request before
1074 * all discards have been issued.
1078 for (int i
= 0; i
< nr
; i
++) {
1079 uint64_t slba
= le64_to_cpu(range
[i
].slba
);
1080 uint32_t nlb
= le32_to_cpu(range
[i
].nlb
);
1082 if (nvme_check_bounds(ns
, slba
, nlb
)) {
1083 trace_pci_nvme_err_invalid_lba_range(slba
, nlb
,
1088 trace_pci_nvme_dsm_deallocate(nvme_cid(req
), nvme_nsid(ns
), slba
,
1091 offset
= nvme_l2b(ns
, slba
);
1092 len
= nvme_l2b(ns
, nlb
);
1095 size_t bytes
= MIN(BDRV_REQUEST_MAX_BYTES
, len
);
1099 blk_aio_pdiscard(ns
->blkconf
.blk
, offset
, bytes
,
1100 nvme_aio_discard_cb
, req
);
1107 /* account for the 1-initialization */
1111 status
= NVME_NO_COMPLETE
;
1113 status
= req
->status
;
1120 static uint16_t nvme_compare(NvmeCtrl
*n
, NvmeRequest
*req
)
1122 NvmeRwCmd
*rw
= (NvmeRwCmd
*)&req
->cmd
;
1123 NvmeNamespace
*ns
= req
->ns
;
1124 BlockBackend
*blk
= ns
->blkconf
.blk
;
1125 uint64_t slba
= le64_to_cpu(rw
->slba
);
1126 uint32_t nlb
= le16_to_cpu(rw
->nlb
) + 1;
1127 size_t len
= nvme_l2b(ns
, nlb
);
1128 int64_t offset
= nvme_l2b(ns
, slba
);
1129 uint8_t *bounce
= NULL
;
1130 struct nvme_compare_ctx
*ctx
= NULL
;
1133 trace_pci_nvme_compare(nvme_cid(req
), nvme_nsid(ns
), slba
, nlb
);
1135 status
= nvme_check_mdts(n
, len
);
1137 trace_pci_nvme_err_mdts(nvme_cid(req
), len
);
1141 status
= nvme_check_bounds(ns
, slba
, nlb
);
1143 trace_pci_nvme_err_invalid_lba_range(slba
, nlb
, ns
->id_ns
.nsze
);
1147 if (NVME_ERR_REC_DULBE(ns
->features
.err_rec
)) {
1148 status
= nvme_check_dulbe(ns
, slba
, nlb
);
1154 bounce
= g_malloc(len
);
1156 ctx
= g_new(struct nvme_compare_ctx
, 1);
1157 ctx
->bounce
= bounce
;
1162 qemu_iovec_init(&ctx
->iov
, 1);
1163 qemu_iovec_add(&ctx
->iov
, bounce
, len
);
1165 block_acct_start(blk_get_stats(blk
), &req
->acct
, len
, BLOCK_ACCT_READ
);
1166 blk_aio_preadv(blk
, offset
, &ctx
->iov
, 0, nvme_compare_cb
, req
);
1168 return NVME_NO_COMPLETE
;
1171 static uint16_t nvme_flush(NvmeCtrl
*n
, NvmeRequest
*req
)
1173 block_acct_start(blk_get_stats(req
->ns
->blkconf
.blk
), &req
->acct
, 0,
1175 req
->aiocb
= blk_aio_flush(req
->ns
->blkconf
.blk
, nvme_rw_cb
, req
);
1176 return NVME_NO_COMPLETE
;
1179 static uint16_t nvme_write_zeroes(NvmeCtrl
*n
, NvmeRequest
*req
)
1181 NvmeRwCmd
*rw
= (NvmeRwCmd
*)&req
->cmd
;
1182 NvmeNamespace
*ns
= req
->ns
;
1183 uint64_t slba
= le64_to_cpu(rw
->slba
);
1184 uint32_t nlb
= (uint32_t)le16_to_cpu(rw
->nlb
) + 1;
1185 uint64_t offset
= nvme_l2b(ns
, slba
);
1186 uint32_t count
= nvme_l2b(ns
, nlb
);
1189 trace_pci_nvme_write_zeroes(nvme_cid(req
), nvme_nsid(ns
), slba
, nlb
);
1191 status
= nvme_check_bounds(ns
, slba
, nlb
);
1193 trace_pci_nvme_err_invalid_lba_range(slba
, nlb
, ns
->id_ns
.nsze
);
1197 block_acct_start(blk_get_stats(req
->ns
->blkconf
.blk
), &req
->acct
, 0,
1199 req
->aiocb
= blk_aio_pwrite_zeroes(req
->ns
->blkconf
.blk
, offset
, count
,
1200 BDRV_REQ_MAY_UNMAP
, nvme_rw_cb
, req
);
1201 return NVME_NO_COMPLETE
;
1204 static uint16_t nvme_rw(NvmeCtrl
*n
, NvmeRequest
*req
)
1206 NvmeRwCmd
*rw
= (NvmeRwCmd
*)&req
->cmd
;
1207 NvmeNamespace
*ns
= req
->ns
;
1208 uint32_t nlb
= (uint32_t)le16_to_cpu(rw
->nlb
) + 1;
1209 uint64_t slba
= le64_to_cpu(rw
->slba
);
1211 uint64_t data_size
= nvme_l2b(ns
, nlb
);
1212 uint64_t data_offset
= nvme_l2b(ns
, slba
);
1213 enum BlockAcctType acct
= req
->cmd
.opcode
== NVME_CMD_WRITE
?
1214 BLOCK_ACCT_WRITE
: BLOCK_ACCT_READ
;
1215 BlockBackend
*blk
= ns
->blkconf
.blk
;
1218 trace_pci_nvme_rw(nvme_cid(req
), nvme_io_opc_str(rw
->opcode
),
1219 nvme_nsid(ns
), nlb
, data_size
, slba
);
1221 status
= nvme_check_mdts(n
, data_size
);
1223 trace_pci_nvme_err_mdts(nvme_cid(req
), data_size
);
1227 status
= nvme_check_bounds(ns
, slba
, nlb
);
1229 trace_pci_nvme_err_invalid_lba_range(slba
, nlb
, ns
->id_ns
.nsze
);
1233 if (acct
== BLOCK_ACCT_READ
) {
1234 if (NVME_ERR_REC_DULBE(ns
->features
.err_rec
)) {
1235 status
= nvme_check_dulbe(ns
, slba
, nlb
);
1242 status
= nvme_map_dptr(n
, data_size
, req
);
1247 block_acct_start(blk_get_stats(blk
), &req
->acct
, data_size
, acct
);
1249 if (acct
== BLOCK_ACCT_WRITE
) {
1250 req
->aiocb
= dma_blk_write(blk
, &req
->qsg
, data_offset
,
1251 BDRV_SECTOR_SIZE
, nvme_rw_cb
, req
);
1253 req
->aiocb
= dma_blk_read(blk
, &req
->qsg
, data_offset
,
1254 BDRV_SECTOR_SIZE
, nvme_rw_cb
, req
);
1257 if (acct
== BLOCK_ACCT_WRITE
) {
1258 req
->aiocb
= blk_aio_pwritev(blk
, data_offset
, &req
->iov
, 0,
1261 req
->aiocb
= blk_aio_preadv(blk
, data_offset
, &req
->iov
, 0,
1265 return NVME_NO_COMPLETE
;
1268 block_acct_invalid(blk_get_stats(ns
->blkconf
.blk
), acct
);
1272 static uint16_t nvme_io_cmd(NvmeCtrl
*n
, NvmeRequest
*req
)
1274 uint32_t nsid
= le32_to_cpu(req
->cmd
.nsid
);
1276 trace_pci_nvme_io_cmd(nvme_cid(req
), nsid
, nvme_sqid(req
),
1277 req
->cmd
.opcode
, nvme_io_opc_str(req
->cmd
.opcode
));
1279 if (NVME_CC_CSS(n
->bar
.cc
) == NVME_CC_CSS_ADMIN_ONLY
) {
1280 return NVME_INVALID_OPCODE
| NVME_DNR
;
1283 if (!nvme_nsid_valid(n
, nsid
)) {
1284 return NVME_INVALID_NSID
| NVME_DNR
;
1287 req
->ns
= nvme_ns(n
, nsid
);
1288 if (unlikely(!req
->ns
)) {
1289 return NVME_INVALID_FIELD
| NVME_DNR
;
1292 switch (req
->cmd
.opcode
) {
1293 case NVME_CMD_FLUSH
:
1294 return nvme_flush(n
, req
);
1295 case NVME_CMD_WRITE_ZEROES
:
1296 return nvme_write_zeroes(n
, req
);
1297 case NVME_CMD_WRITE
:
1299 return nvme_rw(n
, req
);
1300 case NVME_CMD_COMPARE
:
1301 return nvme_compare(n
, req
);
1303 return nvme_dsm(n
, req
);
1305 trace_pci_nvme_err_invalid_opc(req
->cmd
.opcode
);
1306 return NVME_INVALID_OPCODE
| NVME_DNR
;
1310 static void nvme_free_sq(NvmeSQueue
*sq
, NvmeCtrl
*n
)
1312 n
->sq
[sq
->sqid
] = NULL
;
1313 timer_free(sq
->timer
);
1320 static uint16_t nvme_del_sq(NvmeCtrl
*n
, NvmeRequest
*req
)
1322 NvmeDeleteQ
*c
= (NvmeDeleteQ
*)&req
->cmd
;
1323 NvmeRequest
*r
, *next
;
1326 uint16_t qid
= le16_to_cpu(c
->qid
);
1328 if (unlikely(!qid
|| nvme_check_sqid(n
, qid
))) {
1329 trace_pci_nvme_err_invalid_del_sq(qid
);
1330 return NVME_INVALID_QID
| NVME_DNR
;
1333 trace_pci_nvme_del_sq(qid
);
1336 while (!QTAILQ_EMPTY(&sq
->out_req_list
)) {
1337 r
= QTAILQ_FIRST(&sq
->out_req_list
);
1339 blk_aio_cancel(r
->aiocb
);
1341 if (!nvme_check_cqid(n
, sq
->cqid
)) {
1342 cq
= n
->cq
[sq
->cqid
];
1343 QTAILQ_REMOVE(&cq
->sq_list
, sq
, entry
);
1346 QTAILQ_FOREACH_SAFE(r
, &cq
->req_list
, entry
, next
) {
1348 QTAILQ_REMOVE(&cq
->req_list
, r
, entry
);
1349 QTAILQ_INSERT_TAIL(&sq
->req_list
, r
, entry
);
1354 nvme_free_sq(sq
, n
);
1355 return NVME_SUCCESS
;
1358 static void nvme_init_sq(NvmeSQueue
*sq
, NvmeCtrl
*n
, uint64_t dma_addr
,
1359 uint16_t sqid
, uint16_t cqid
, uint16_t size
)
1365 sq
->dma_addr
= dma_addr
;
1369 sq
->head
= sq
->tail
= 0;
1370 sq
->io_req
= g_new0(NvmeRequest
, sq
->size
);
1372 QTAILQ_INIT(&sq
->req_list
);
1373 QTAILQ_INIT(&sq
->out_req_list
);
1374 for (i
= 0; i
< sq
->size
; i
++) {
1375 sq
->io_req
[i
].sq
= sq
;
1376 QTAILQ_INSERT_TAIL(&(sq
->req_list
), &sq
->io_req
[i
], entry
);
1378 sq
->timer
= timer_new_ns(QEMU_CLOCK_VIRTUAL
, nvme_process_sq
, sq
);
1380 assert(n
->cq
[cqid
]);
1382 QTAILQ_INSERT_TAIL(&(cq
->sq_list
), sq
, entry
);
1386 static uint16_t nvme_create_sq(NvmeCtrl
*n
, NvmeRequest
*req
)
1389 NvmeCreateSq
*c
= (NvmeCreateSq
*)&req
->cmd
;
1391 uint16_t cqid
= le16_to_cpu(c
->cqid
);
1392 uint16_t sqid
= le16_to_cpu(c
->sqid
);
1393 uint16_t qsize
= le16_to_cpu(c
->qsize
);
1394 uint16_t qflags
= le16_to_cpu(c
->sq_flags
);
1395 uint64_t prp1
= le64_to_cpu(c
->prp1
);
1397 trace_pci_nvme_create_sq(prp1
, sqid
, cqid
, qsize
, qflags
);
1399 if (unlikely(!cqid
|| nvme_check_cqid(n
, cqid
))) {
1400 trace_pci_nvme_err_invalid_create_sq_cqid(cqid
);
1401 return NVME_INVALID_CQID
| NVME_DNR
;
1403 if (unlikely(!sqid
|| sqid
> n
->params
.max_ioqpairs
||
1404 n
->sq
[sqid
] != NULL
)) {
1405 trace_pci_nvme_err_invalid_create_sq_sqid(sqid
);
1406 return NVME_INVALID_QID
| NVME_DNR
;
1408 if (unlikely(!qsize
|| qsize
> NVME_CAP_MQES(n
->bar
.cap
))) {
1409 trace_pci_nvme_err_invalid_create_sq_size(qsize
);
1410 return NVME_MAX_QSIZE_EXCEEDED
| NVME_DNR
;
1412 if (unlikely(prp1
& (n
->page_size
- 1))) {
1413 trace_pci_nvme_err_invalid_create_sq_addr(prp1
);
1414 return NVME_INVALID_PRP_OFFSET
| NVME_DNR
;
1416 if (unlikely(!(NVME_SQ_FLAGS_PC(qflags
)))) {
1417 trace_pci_nvme_err_invalid_create_sq_qflags(NVME_SQ_FLAGS_PC(qflags
));
1418 return NVME_INVALID_FIELD
| NVME_DNR
;
1420 sq
= g_malloc0(sizeof(*sq
));
1421 nvme_init_sq(sq
, n
, prp1
, sqid
, cqid
, qsize
+ 1);
1422 return NVME_SUCCESS
;
1426 uint64_t units_read
;
1427 uint64_t units_written
;
1428 uint64_t read_commands
;
1429 uint64_t write_commands
;
1432 static void nvme_set_blk_stats(NvmeNamespace
*ns
, struct nvme_stats
*stats
)
1434 BlockAcctStats
*s
= blk_get_stats(ns
->blkconf
.blk
);
1436 stats
->units_read
+= s
->nr_bytes
[BLOCK_ACCT_READ
] >> BDRV_SECTOR_BITS
;
1437 stats
->units_written
+= s
->nr_bytes
[BLOCK_ACCT_WRITE
] >> BDRV_SECTOR_BITS
;
1438 stats
->read_commands
+= s
->nr_ops
[BLOCK_ACCT_READ
];
1439 stats
->write_commands
+= s
->nr_ops
[BLOCK_ACCT_WRITE
];
1442 static uint16_t nvme_smart_info(NvmeCtrl
*n
, uint8_t rae
, uint32_t buf_len
,
1443 uint64_t off
, NvmeRequest
*req
)
1445 uint32_t nsid
= le32_to_cpu(req
->cmd
.nsid
);
1446 struct nvme_stats stats
= { 0 };
1447 NvmeSmartLog smart
= { 0 };
1452 if (off
>= sizeof(smart
)) {
1453 return NVME_INVALID_FIELD
| NVME_DNR
;
1456 if (nsid
!= 0xffffffff) {
1457 ns
= nvme_ns(n
, nsid
);
1459 return NVME_INVALID_NSID
| NVME_DNR
;
1461 nvme_set_blk_stats(ns
, &stats
);
1465 for (i
= 1; i
<= n
->num_namespaces
; i
++) {
1470 nvme_set_blk_stats(ns
, &stats
);
1474 trans_len
= MIN(sizeof(smart
) - off
, buf_len
);
1476 smart
.data_units_read
[0] = cpu_to_le64(DIV_ROUND_UP(stats
.units_read
,
1478 smart
.data_units_written
[0] = cpu_to_le64(DIV_ROUND_UP(stats
.units_written
,
1480 smart
.host_read_commands
[0] = cpu_to_le64(stats
.read_commands
);
1481 smart
.host_write_commands
[0] = cpu_to_le64(stats
.write_commands
);
1483 smart
.temperature
= cpu_to_le16(n
->temperature
);
1485 if ((n
->temperature
>= n
->features
.temp_thresh_hi
) ||
1486 (n
->temperature
<= n
->features
.temp_thresh_low
)) {
1487 smart
.critical_warning
|= NVME_SMART_TEMPERATURE
;
1490 current_ms
= qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
);
1491 smart
.power_on_hours
[0] =
1492 cpu_to_le64((((current_ms
- n
->starttime_ms
) / 1000) / 60) / 60);
1495 nvme_clear_events(n
, NVME_AER_TYPE_SMART
);
1498 return nvme_dma(n
, (uint8_t *) &smart
+ off
, trans_len
,
1499 DMA_DIRECTION_FROM_DEVICE
, req
);
1502 static uint16_t nvme_fw_log_info(NvmeCtrl
*n
, uint32_t buf_len
, uint64_t off
,
1506 NvmeFwSlotInfoLog fw_log
= {
1510 if (off
>= sizeof(fw_log
)) {
1511 return NVME_INVALID_FIELD
| NVME_DNR
;
1514 strpadcpy((char *)&fw_log
.frs1
, sizeof(fw_log
.frs1
), "1.0", ' ');
1515 trans_len
= MIN(sizeof(fw_log
) - off
, buf_len
);
1517 return nvme_dma(n
, (uint8_t *) &fw_log
+ off
, trans_len
,
1518 DMA_DIRECTION_FROM_DEVICE
, req
);
1521 static uint16_t nvme_error_info(NvmeCtrl
*n
, uint8_t rae
, uint32_t buf_len
,
1522 uint64_t off
, NvmeRequest
*req
)
1525 NvmeErrorLog errlog
;
1527 if (off
>= sizeof(errlog
)) {
1528 return NVME_INVALID_FIELD
| NVME_DNR
;
1532 nvme_clear_events(n
, NVME_AER_TYPE_ERROR
);
1535 memset(&errlog
, 0x0, sizeof(errlog
));
1536 trans_len
= MIN(sizeof(errlog
) - off
, buf_len
);
1538 return nvme_dma(n
, (uint8_t *)&errlog
, trans_len
,
1539 DMA_DIRECTION_FROM_DEVICE
, req
);
1542 static uint16_t nvme_get_log(NvmeCtrl
*n
, NvmeRequest
*req
)
1544 NvmeCmd
*cmd
= &req
->cmd
;
1546 uint32_t dw10
= le32_to_cpu(cmd
->cdw10
);
1547 uint32_t dw11
= le32_to_cpu(cmd
->cdw11
);
1548 uint32_t dw12
= le32_to_cpu(cmd
->cdw12
);
1549 uint32_t dw13
= le32_to_cpu(cmd
->cdw13
);
1550 uint8_t lid
= dw10
& 0xff;
1551 uint8_t lsp
= (dw10
>> 8) & 0xf;
1552 uint8_t rae
= (dw10
>> 15) & 0x1;
1553 uint32_t numdl
, numdu
;
1554 uint64_t off
, lpol
, lpou
;
1558 numdl
= (dw10
>> 16);
1559 numdu
= (dw11
& 0xffff);
1563 len
= (((numdu
<< 16) | numdl
) + 1) << 2;
1564 off
= (lpou
<< 32ULL) | lpol
;
1567 return NVME_INVALID_FIELD
| NVME_DNR
;
1570 trace_pci_nvme_get_log(nvme_cid(req
), lid
, lsp
, rae
, len
, off
);
1572 status
= nvme_check_mdts(n
, len
);
1574 trace_pci_nvme_err_mdts(nvme_cid(req
), len
);
1579 case NVME_LOG_ERROR_INFO
:
1580 return nvme_error_info(n
, rae
, len
, off
, req
);
1581 case NVME_LOG_SMART_INFO
:
1582 return nvme_smart_info(n
, rae
, len
, off
, req
);
1583 case NVME_LOG_FW_SLOT_INFO
:
1584 return nvme_fw_log_info(n
, len
, off
, req
);
1586 trace_pci_nvme_err_invalid_log_page(nvme_cid(req
), lid
);
1587 return NVME_INVALID_FIELD
| NVME_DNR
;
1591 static void nvme_free_cq(NvmeCQueue
*cq
, NvmeCtrl
*n
)
1593 n
->cq
[cq
->cqid
] = NULL
;
1594 timer_free(cq
->timer
);
1595 msix_vector_unuse(&n
->parent_obj
, cq
->vector
);
1601 static uint16_t nvme_del_cq(NvmeCtrl
*n
, NvmeRequest
*req
)
1603 NvmeDeleteQ
*c
= (NvmeDeleteQ
*)&req
->cmd
;
1605 uint16_t qid
= le16_to_cpu(c
->qid
);
1607 if (unlikely(!qid
|| nvme_check_cqid(n
, qid
))) {
1608 trace_pci_nvme_err_invalid_del_cq_cqid(qid
);
1609 return NVME_INVALID_CQID
| NVME_DNR
;
1613 if (unlikely(!QTAILQ_EMPTY(&cq
->sq_list
))) {
1614 trace_pci_nvme_err_invalid_del_cq_notempty(qid
);
1615 return NVME_INVALID_QUEUE_DEL
;
1617 nvme_irq_deassert(n
, cq
);
1618 trace_pci_nvme_del_cq(qid
);
1619 nvme_free_cq(cq
, n
);
1620 return NVME_SUCCESS
;
1623 static void nvme_init_cq(NvmeCQueue
*cq
, NvmeCtrl
*n
, uint64_t dma_addr
,
1624 uint16_t cqid
, uint16_t vector
, uint16_t size
,
1625 uint16_t irq_enabled
)
1629 ret
= msix_vector_use(&n
->parent_obj
, vector
);
1634 cq
->dma_addr
= dma_addr
;
1636 cq
->irq_enabled
= irq_enabled
;
1637 cq
->vector
= vector
;
1638 cq
->head
= cq
->tail
= 0;
1639 QTAILQ_INIT(&cq
->req_list
);
1640 QTAILQ_INIT(&cq
->sq_list
);
1642 cq
->timer
= timer_new_ns(QEMU_CLOCK_VIRTUAL
, nvme_post_cqes
, cq
);
1645 static uint16_t nvme_create_cq(NvmeCtrl
*n
, NvmeRequest
*req
)
1648 NvmeCreateCq
*c
= (NvmeCreateCq
*)&req
->cmd
;
1649 uint16_t cqid
= le16_to_cpu(c
->cqid
);
1650 uint16_t vector
= le16_to_cpu(c
->irq_vector
);
1651 uint16_t qsize
= le16_to_cpu(c
->qsize
);
1652 uint16_t qflags
= le16_to_cpu(c
->cq_flags
);
1653 uint64_t prp1
= le64_to_cpu(c
->prp1
);
1655 trace_pci_nvme_create_cq(prp1
, cqid
, vector
, qsize
, qflags
,
1656 NVME_CQ_FLAGS_IEN(qflags
) != 0);
1658 if (unlikely(!cqid
|| cqid
> n
->params
.max_ioqpairs
||
1659 n
->cq
[cqid
] != NULL
)) {
1660 trace_pci_nvme_err_invalid_create_cq_cqid(cqid
);
1661 return NVME_INVALID_QID
| NVME_DNR
;
1663 if (unlikely(!qsize
|| qsize
> NVME_CAP_MQES(n
->bar
.cap
))) {
1664 trace_pci_nvme_err_invalid_create_cq_size(qsize
);
1665 return NVME_MAX_QSIZE_EXCEEDED
| NVME_DNR
;
1667 if (unlikely(prp1
& (n
->page_size
- 1))) {
1668 trace_pci_nvme_err_invalid_create_cq_addr(prp1
);
1669 return NVME_INVALID_PRP_OFFSET
| NVME_DNR
;
1671 if (unlikely(!msix_enabled(&n
->parent_obj
) && vector
)) {
1672 trace_pci_nvme_err_invalid_create_cq_vector(vector
);
1673 return NVME_INVALID_IRQ_VECTOR
| NVME_DNR
;
1675 if (unlikely(vector
>= n
->params
.msix_qsize
)) {
1676 trace_pci_nvme_err_invalid_create_cq_vector(vector
);
1677 return NVME_INVALID_IRQ_VECTOR
| NVME_DNR
;
1679 if (unlikely(!(NVME_CQ_FLAGS_PC(qflags
)))) {
1680 trace_pci_nvme_err_invalid_create_cq_qflags(NVME_CQ_FLAGS_PC(qflags
));
1681 return NVME_INVALID_FIELD
| NVME_DNR
;
1684 cq
= g_malloc0(sizeof(*cq
));
1685 nvme_init_cq(cq
, n
, prp1
, cqid
, vector
, qsize
+ 1,
1686 NVME_CQ_FLAGS_IEN(qflags
));
1689 * It is only required to set qs_created when creating a completion queue;
1690 * creating a submission queue without a matching completion queue will
1693 n
->qs_created
= true;
1694 return NVME_SUCCESS
;
1697 static uint16_t nvme_identify_ctrl(NvmeCtrl
*n
, NvmeRequest
*req
)
1699 trace_pci_nvme_identify_ctrl();
1701 return nvme_dma(n
, (uint8_t *)&n
->id_ctrl
, sizeof(n
->id_ctrl
),
1702 DMA_DIRECTION_FROM_DEVICE
, req
);
1705 static uint16_t nvme_identify_ns(NvmeCtrl
*n
, NvmeRequest
*req
)
1708 NvmeIdentify
*c
= (NvmeIdentify
*)&req
->cmd
;
1709 NvmeIdNs
*id_ns
, inactive
= { 0 };
1710 uint32_t nsid
= le32_to_cpu(c
->nsid
);
1712 trace_pci_nvme_identify_ns(nsid
);
1714 if (!nvme_nsid_valid(n
, nsid
) || nsid
== NVME_NSID_BROADCAST
) {
1715 return NVME_INVALID_NSID
| NVME_DNR
;
1718 ns
= nvme_ns(n
, nsid
);
1719 if (unlikely(!ns
)) {
1725 return nvme_dma(n
, (uint8_t *)id_ns
, sizeof(NvmeIdNs
),
1726 DMA_DIRECTION_FROM_DEVICE
, req
);
1729 static uint16_t nvme_identify_nslist(NvmeCtrl
*n
, NvmeRequest
*req
)
1731 NvmeIdentify
*c
= (NvmeIdentify
*)&req
->cmd
;
1732 static const int data_len
= NVME_IDENTIFY_DATA_SIZE
;
1733 uint32_t min_nsid
= le32_to_cpu(c
->nsid
);
1738 trace_pci_nvme_identify_nslist(min_nsid
);
1741 * Both 0xffffffff (NVME_NSID_BROADCAST) and 0xfffffffe are invalid values
1742 * since the Active Namespace ID List should return namespaces with ids
1743 * *higher* than the NSID specified in the command. This is also specified
1744 * in the spec (NVM Express v1.3d, Section 5.15.4).
1746 if (min_nsid
>= NVME_NSID_BROADCAST
- 1) {
1747 return NVME_INVALID_NSID
| NVME_DNR
;
1750 list
= g_malloc0(data_len
);
1751 for (int i
= 1; i
<= n
->num_namespaces
; i
++) {
1752 if (i
<= min_nsid
|| !nvme_ns(n
, i
)) {
1755 list
[j
++] = cpu_to_le32(i
);
1756 if (j
== data_len
/ sizeof(uint32_t)) {
1760 ret
= nvme_dma(n
, (uint8_t *)list
, data_len
, DMA_DIRECTION_FROM_DEVICE
,
1766 static uint16_t nvme_identify_ns_descr_list(NvmeCtrl
*n
, NvmeRequest
*req
)
1768 NvmeIdentify
*c
= (NvmeIdentify
*)&req
->cmd
;
1769 uint32_t nsid
= le32_to_cpu(c
->nsid
);
1770 uint8_t list
[NVME_IDENTIFY_DATA_SIZE
];
1779 struct data
*ns_descrs
= (struct data
*)list
;
1781 trace_pci_nvme_identify_ns_descr_list(nsid
);
1783 if (!nvme_nsid_valid(n
, nsid
) || nsid
== NVME_NSID_BROADCAST
) {
1784 return NVME_INVALID_NSID
| NVME_DNR
;
1787 if (unlikely(!nvme_ns(n
, nsid
))) {
1788 return NVME_INVALID_FIELD
| NVME_DNR
;
1791 memset(list
, 0x0, sizeof(list
));
1794 * Because the NGUID and EUI64 fields are 0 in the Identify Namespace data
1795 * structure, a Namespace UUID (nidt = 0x3) must be reported in the
1796 * Namespace Identification Descriptor. Add a very basic Namespace UUID
1799 ns_descrs
->uuid
.hdr
.nidt
= NVME_NIDT_UUID
;
1800 ns_descrs
->uuid
.hdr
.nidl
= NVME_NIDT_UUID_LEN
;
1801 stl_be_p(&ns_descrs
->uuid
.v
, nsid
);
1803 return nvme_dma(n
, list
, NVME_IDENTIFY_DATA_SIZE
,
1804 DMA_DIRECTION_FROM_DEVICE
, req
);
1807 static uint16_t nvme_identify(NvmeCtrl
*n
, NvmeRequest
*req
)
1809 NvmeIdentify
*c
= (NvmeIdentify
*)&req
->cmd
;
1811 switch (le32_to_cpu(c
->cns
)) {
1812 case NVME_ID_CNS_NS
:
1813 return nvme_identify_ns(n
, req
);
1814 case NVME_ID_CNS_CTRL
:
1815 return nvme_identify_ctrl(n
, req
);
1816 case NVME_ID_CNS_NS_ACTIVE_LIST
:
1817 return nvme_identify_nslist(n
, req
);
1818 case NVME_ID_CNS_NS_DESCR_LIST
:
1819 return nvme_identify_ns_descr_list(n
, req
);
1821 trace_pci_nvme_err_invalid_identify_cns(le32_to_cpu(c
->cns
));
1822 return NVME_INVALID_FIELD
| NVME_DNR
;
1826 static uint16_t nvme_abort(NvmeCtrl
*n
, NvmeRequest
*req
)
1828 uint16_t sqid
= le32_to_cpu(req
->cmd
.cdw10
) & 0xffff;
1830 req
->cqe
.result
= 1;
1831 if (nvme_check_sqid(n
, sqid
)) {
1832 return NVME_INVALID_FIELD
| NVME_DNR
;
1835 return NVME_SUCCESS
;
1838 static inline void nvme_set_timestamp(NvmeCtrl
*n
, uint64_t ts
)
1840 trace_pci_nvme_setfeat_timestamp(ts
);
1842 n
->host_timestamp
= le64_to_cpu(ts
);
1843 n
->timestamp_set_qemu_clock_ms
= qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
);
1846 static inline uint64_t nvme_get_timestamp(const NvmeCtrl
*n
)
1848 uint64_t current_time
= qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
);
1849 uint64_t elapsed_time
= current_time
- n
->timestamp_set_qemu_clock_ms
;
1851 union nvme_timestamp
{
1853 uint64_t timestamp
:48;
1861 union nvme_timestamp ts
;
1863 ts
.timestamp
= n
->host_timestamp
+ elapsed_time
;
1865 /* If the host timestamp is non-zero, set the timestamp origin */
1866 ts
.origin
= n
->host_timestamp
? 0x01 : 0x00;
1868 trace_pci_nvme_getfeat_timestamp(ts
.all
);
1870 return cpu_to_le64(ts
.all
);
1873 static uint16_t nvme_get_feature_timestamp(NvmeCtrl
*n
, NvmeRequest
*req
)
1875 uint64_t timestamp
= nvme_get_timestamp(n
);
1877 return nvme_dma(n
, (uint8_t *)×tamp
, sizeof(timestamp
),
1878 DMA_DIRECTION_FROM_DEVICE
, req
);
1881 static uint16_t nvme_get_feature(NvmeCtrl
*n
, NvmeRequest
*req
)
1883 NvmeCmd
*cmd
= &req
->cmd
;
1884 uint32_t dw10
= le32_to_cpu(cmd
->cdw10
);
1885 uint32_t dw11
= le32_to_cpu(cmd
->cdw11
);
1886 uint32_t nsid
= le32_to_cpu(cmd
->nsid
);
1888 uint8_t fid
= NVME_GETSETFEAT_FID(dw10
);
1889 NvmeGetFeatureSelect sel
= NVME_GETFEAT_SELECT(dw10
);
1893 static const uint32_t nvme_feature_default
[NVME_FID_MAX
] = {
1894 [NVME_ARBITRATION
] = NVME_ARB_AB_NOLIMIT
,
1897 trace_pci_nvme_getfeat(nvme_cid(req
), nsid
, fid
, sel
, dw11
);
1899 if (!nvme_feature_support
[fid
]) {
1900 return NVME_INVALID_FIELD
| NVME_DNR
;
1903 if (nvme_feature_cap
[fid
] & NVME_FEAT_CAP_NS
) {
1904 if (!nvme_nsid_valid(n
, nsid
) || nsid
== NVME_NSID_BROADCAST
) {
1906 * The Reservation Notification Mask and Reservation Persistence
1907 * features require a status code of Invalid Field in Command when
1908 * NSID is 0xFFFFFFFF. Since the device does not support those
1909 * features we can always return Invalid Namespace or Format as we
1910 * should do for all other features.
1912 return NVME_INVALID_NSID
| NVME_DNR
;
1915 if (!nvme_ns(n
, nsid
)) {
1916 return NVME_INVALID_FIELD
| NVME_DNR
;
1921 case NVME_GETFEAT_SELECT_CURRENT
:
1923 case NVME_GETFEAT_SELECT_SAVED
:
1924 /* no features are saveable by the controller; fallthrough */
1925 case NVME_GETFEAT_SELECT_DEFAULT
:
1927 case NVME_GETFEAT_SELECT_CAP
:
1928 result
= nvme_feature_cap
[fid
];
1933 case NVME_TEMPERATURE_THRESHOLD
:
1937 * The controller only implements the Composite Temperature sensor, so
1938 * return 0 for all other sensors.
1940 if (NVME_TEMP_TMPSEL(dw11
) != NVME_TEMP_TMPSEL_COMPOSITE
) {
1944 switch (NVME_TEMP_THSEL(dw11
)) {
1945 case NVME_TEMP_THSEL_OVER
:
1946 result
= n
->features
.temp_thresh_hi
;
1948 case NVME_TEMP_THSEL_UNDER
:
1949 result
= n
->features
.temp_thresh_low
;
1953 return NVME_INVALID_FIELD
| NVME_DNR
;
1954 case NVME_ERROR_RECOVERY
:
1955 if (!nvme_nsid_valid(n
, nsid
)) {
1956 return NVME_INVALID_NSID
| NVME_DNR
;
1959 ns
= nvme_ns(n
, nsid
);
1960 if (unlikely(!ns
)) {
1961 return NVME_INVALID_FIELD
| NVME_DNR
;
1964 result
= ns
->features
.err_rec
;
1966 case NVME_VOLATILE_WRITE_CACHE
:
1967 result
= n
->features
.vwc
;
1968 trace_pci_nvme_getfeat_vwcache(result
? "enabled" : "disabled");
1970 case NVME_ASYNCHRONOUS_EVENT_CONF
:
1971 result
= n
->features
.async_config
;
1973 case NVME_TIMESTAMP
:
1974 return nvme_get_feature_timestamp(n
, req
);
1981 case NVME_TEMPERATURE_THRESHOLD
:
1984 if (NVME_TEMP_TMPSEL(dw11
) != NVME_TEMP_TMPSEL_COMPOSITE
) {
1988 if (NVME_TEMP_THSEL(dw11
) == NVME_TEMP_THSEL_OVER
) {
1989 result
= NVME_TEMPERATURE_WARNING
;
1993 case NVME_NUMBER_OF_QUEUES
:
1994 result
= (n
->params
.max_ioqpairs
- 1) |
1995 ((n
->params
.max_ioqpairs
- 1) << 16);
1996 trace_pci_nvme_getfeat_numq(result
);
1998 case NVME_INTERRUPT_VECTOR_CONF
:
2000 if (iv
>= n
->params
.max_ioqpairs
+ 1) {
2001 return NVME_INVALID_FIELD
| NVME_DNR
;
2005 if (iv
== n
->admin_cq
.vector
) {
2006 result
|= NVME_INTVC_NOCOALESCING
;
2011 result
= nvme_feature_default
[fid
];
2016 req
->cqe
.result
= cpu_to_le32(result
);
2017 return NVME_SUCCESS
;
2020 static uint16_t nvme_set_feature_timestamp(NvmeCtrl
*n
, NvmeRequest
*req
)
2025 ret
= nvme_dma(n
, (uint8_t *)×tamp
, sizeof(timestamp
),
2026 DMA_DIRECTION_TO_DEVICE
, req
);
2027 if (ret
!= NVME_SUCCESS
) {
2031 nvme_set_timestamp(n
, timestamp
);
2033 return NVME_SUCCESS
;
2036 static uint16_t nvme_set_feature(NvmeCtrl
*n
, NvmeRequest
*req
)
2038 NvmeNamespace
*ns
= NULL
;
2040 NvmeCmd
*cmd
= &req
->cmd
;
2041 uint32_t dw10
= le32_to_cpu(cmd
->cdw10
);
2042 uint32_t dw11
= le32_to_cpu(cmd
->cdw11
);
2043 uint32_t nsid
= le32_to_cpu(cmd
->nsid
);
2044 uint8_t fid
= NVME_GETSETFEAT_FID(dw10
);
2045 uint8_t save
= NVME_SETFEAT_SAVE(dw10
);
2048 trace_pci_nvme_setfeat(nvme_cid(req
), nsid
, fid
, save
, dw11
);
2051 return NVME_FID_NOT_SAVEABLE
| NVME_DNR
;
2054 if (!nvme_feature_support
[fid
]) {
2055 return NVME_INVALID_FIELD
| NVME_DNR
;
2058 if (nvme_feature_cap
[fid
] & NVME_FEAT_CAP_NS
) {
2059 if (nsid
!= NVME_NSID_BROADCAST
) {
2060 if (!nvme_nsid_valid(n
, nsid
)) {
2061 return NVME_INVALID_NSID
| NVME_DNR
;
2064 ns
= nvme_ns(n
, nsid
);
2065 if (unlikely(!ns
)) {
2066 return NVME_INVALID_FIELD
| NVME_DNR
;
2069 } else if (nsid
&& nsid
!= NVME_NSID_BROADCAST
) {
2070 if (!nvme_nsid_valid(n
, nsid
)) {
2071 return NVME_INVALID_NSID
| NVME_DNR
;
2074 return NVME_FEAT_NOT_NS_SPEC
| NVME_DNR
;
2077 if (!(nvme_feature_cap
[fid
] & NVME_FEAT_CAP_CHANGE
)) {
2078 return NVME_FEAT_NOT_CHANGEABLE
| NVME_DNR
;
2082 case NVME_TEMPERATURE_THRESHOLD
:
2083 if (NVME_TEMP_TMPSEL(dw11
) != NVME_TEMP_TMPSEL_COMPOSITE
) {
2087 switch (NVME_TEMP_THSEL(dw11
)) {
2088 case NVME_TEMP_THSEL_OVER
:
2089 n
->features
.temp_thresh_hi
= NVME_TEMP_TMPTH(dw11
);
2091 case NVME_TEMP_THSEL_UNDER
:
2092 n
->features
.temp_thresh_low
= NVME_TEMP_TMPTH(dw11
);
2095 return NVME_INVALID_FIELD
| NVME_DNR
;
2098 if (((n
->temperature
>= n
->features
.temp_thresh_hi
) ||
2099 (n
->temperature
<= n
->features
.temp_thresh_low
)) &&
2100 NVME_AEC_SMART(n
->features
.async_config
) & NVME_SMART_TEMPERATURE
) {
2101 nvme_enqueue_event(n
, NVME_AER_TYPE_SMART
,
2102 NVME_AER_INFO_SMART_TEMP_THRESH
,
2103 NVME_LOG_SMART_INFO
);
2107 case NVME_ERROR_RECOVERY
:
2108 if (nsid
== NVME_NSID_BROADCAST
) {
2109 for (i
= 1; i
<= n
->num_namespaces
; i
++) {
2116 if (NVME_ID_NS_NSFEAT_DULBE(ns
->id_ns
.nsfeat
)) {
2117 ns
->features
.err_rec
= dw11
;
2125 ns
->features
.err_rec
= dw11
;
2127 case NVME_VOLATILE_WRITE_CACHE
:
2128 n
->features
.vwc
= dw11
& 0x1;
2130 for (i
= 1; i
<= n
->num_namespaces
; i
++) {
2136 if (!(dw11
& 0x1) && blk_enable_write_cache(ns
->blkconf
.blk
)) {
2137 blk_flush(ns
->blkconf
.blk
);
2140 blk_set_enable_write_cache(ns
->blkconf
.blk
, dw11
& 1);
2145 case NVME_NUMBER_OF_QUEUES
:
2146 if (n
->qs_created
) {
2147 return NVME_CMD_SEQ_ERROR
| NVME_DNR
;
2151 * NVMe v1.3, Section 5.21.1.7: 0xffff is not an allowed value for NCQR
2154 if ((dw11
& 0xffff) == 0xffff || ((dw11
>> 16) & 0xffff) == 0xffff) {
2155 return NVME_INVALID_FIELD
| NVME_DNR
;
2158 trace_pci_nvme_setfeat_numq((dw11
& 0xFFFF) + 1,
2159 ((dw11
>> 16) & 0xFFFF) + 1,
2160 n
->params
.max_ioqpairs
,
2161 n
->params
.max_ioqpairs
);
2162 req
->cqe
.result
= cpu_to_le32((n
->params
.max_ioqpairs
- 1) |
2163 ((n
->params
.max_ioqpairs
- 1) << 16));
2165 case NVME_ASYNCHRONOUS_EVENT_CONF
:
2166 n
->features
.async_config
= dw11
;
2168 case NVME_TIMESTAMP
:
2169 return nvme_set_feature_timestamp(n
, req
);
2171 return NVME_FEAT_NOT_CHANGEABLE
| NVME_DNR
;
2173 return NVME_SUCCESS
;
2176 static uint16_t nvme_aer(NvmeCtrl
*n
, NvmeRequest
*req
)
2178 trace_pci_nvme_aer(nvme_cid(req
));
2180 if (n
->outstanding_aers
> n
->params
.aerl
) {
2181 trace_pci_nvme_aer_aerl_exceeded();
2182 return NVME_AER_LIMIT_EXCEEDED
;
2185 n
->aer_reqs
[n
->outstanding_aers
] = req
;
2186 n
->outstanding_aers
++;
2188 if (!QTAILQ_EMPTY(&n
->aer_queue
)) {
2189 nvme_process_aers(n
);
2192 return NVME_NO_COMPLETE
;
2195 static uint16_t nvme_admin_cmd(NvmeCtrl
*n
, NvmeRequest
*req
)
2197 trace_pci_nvme_admin_cmd(nvme_cid(req
), nvme_sqid(req
), req
->cmd
.opcode
,
2198 nvme_adm_opc_str(req
->cmd
.opcode
));
2200 switch (req
->cmd
.opcode
) {
2201 case NVME_ADM_CMD_DELETE_SQ
:
2202 return nvme_del_sq(n
, req
);
2203 case NVME_ADM_CMD_CREATE_SQ
:
2204 return nvme_create_sq(n
, req
);
2205 case NVME_ADM_CMD_GET_LOG_PAGE
:
2206 return nvme_get_log(n
, req
);
2207 case NVME_ADM_CMD_DELETE_CQ
:
2208 return nvme_del_cq(n
, req
);
2209 case NVME_ADM_CMD_CREATE_CQ
:
2210 return nvme_create_cq(n
, req
);
2211 case NVME_ADM_CMD_IDENTIFY
:
2212 return nvme_identify(n
, req
);
2213 case NVME_ADM_CMD_ABORT
:
2214 return nvme_abort(n
, req
);
2215 case NVME_ADM_CMD_SET_FEATURES
:
2216 return nvme_set_feature(n
, req
);
2217 case NVME_ADM_CMD_GET_FEATURES
:
2218 return nvme_get_feature(n
, req
);
2219 case NVME_ADM_CMD_ASYNC_EV_REQ
:
2220 return nvme_aer(n
, req
);
2222 trace_pci_nvme_err_invalid_admin_opc(req
->cmd
.opcode
);
2223 return NVME_INVALID_OPCODE
| NVME_DNR
;
2227 static void nvme_process_sq(void *opaque
)
2229 NvmeSQueue
*sq
= opaque
;
2230 NvmeCtrl
*n
= sq
->ctrl
;
2231 NvmeCQueue
*cq
= n
->cq
[sq
->cqid
];
2238 while (!(nvme_sq_empty(sq
) || QTAILQ_EMPTY(&sq
->req_list
))) {
2239 addr
= sq
->dma_addr
+ sq
->head
* n
->sqe_size
;
2240 if (nvme_addr_read(n
, addr
, (void *)&cmd
, sizeof(cmd
))) {
2241 trace_pci_nvme_err_addr_read(addr
);
2242 trace_pci_nvme_err_cfs();
2243 n
->bar
.csts
= NVME_CSTS_FAILED
;
2246 nvme_inc_sq_head(sq
);
2248 req
= QTAILQ_FIRST(&sq
->req_list
);
2249 QTAILQ_REMOVE(&sq
->req_list
, req
, entry
);
2250 QTAILQ_INSERT_TAIL(&sq
->out_req_list
, req
, entry
);
2251 nvme_req_clear(req
);
2252 req
->cqe
.cid
= cmd
.cid
;
2253 memcpy(&req
->cmd
, &cmd
, sizeof(NvmeCmd
));
2255 status
= sq
->sqid
? nvme_io_cmd(n
, req
) :
2256 nvme_admin_cmd(n
, req
);
2257 if (status
!= NVME_NO_COMPLETE
) {
2258 req
->status
= status
;
2259 nvme_enqueue_req_completion(cq
, req
);
2264 static void nvme_clear_ctrl(NvmeCtrl
*n
)
2269 for (i
= 1; i
<= n
->num_namespaces
; i
++) {
2278 for (i
= 0; i
< n
->params
.max_ioqpairs
+ 1; i
++) {
2279 if (n
->sq
[i
] != NULL
) {
2280 nvme_free_sq(n
->sq
[i
], n
);
2283 for (i
= 0; i
< n
->params
.max_ioqpairs
+ 1; i
++) {
2284 if (n
->cq
[i
] != NULL
) {
2285 nvme_free_cq(n
->cq
[i
], n
);
2289 while (!QTAILQ_EMPTY(&n
->aer_queue
)) {
2290 NvmeAsyncEvent
*event
= QTAILQ_FIRST(&n
->aer_queue
);
2291 QTAILQ_REMOVE(&n
->aer_queue
, event
, entry
);
2296 n
->outstanding_aers
= 0;
2297 n
->qs_created
= false;
2299 for (i
= 1; i
<= n
->num_namespaces
; i
++) {
2311 static int nvme_start_ctrl(NvmeCtrl
*n
)
2313 uint32_t page_bits
= NVME_CC_MPS(n
->bar
.cc
) + 12;
2314 uint32_t page_size
= 1 << page_bits
;
2316 if (unlikely(n
->cq
[0])) {
2317 trace_pci_nvme_err_startfail_cq();
2320 if (unlikely(n
->sq
[0])) {
2321 trace_pci_nvme_err_startfail_sq();
2324 if (unlikely(!n
->bar
.asq
)) {
2325 trace_pci_nvme_err_startfail_nbarasq();
2328 if (unlikely(!n
->bar
.acq
)) {
2329 trace_pci_nvme_err_startfail_nbaracq();
2332 if (unlikely(n
->bar
.asq
& (page_size
- 1))) {
2333 trace_pci_nvme_err_startfail_asq_misaligned(n
->bar
.asq
);
2336 if (unlikely(n
->bar
.acq
& (page_size
- 1))) {
2337 trace_pci_nvme_err_startfail_acq_misaligned(n
->bar
.acq
);
2340 if (unlikely(!(NVME_CAP_CSS(n
->bar
.cap
) & (1 << NVME_CC_CSS(n
->bar
.cc
))))) {
2341 trace_pci_nvme_err_startfail_css(NVME_CC_CSS(n
->bar
.cc
));
2344 if (unlikely(NVME_CC_MPS(n
->bar
.cc
) <
2345 NVME_CAP_MPSMIN(n
->bar
.cap
))) {
2346 trace_pci_nvme_err_startfail_page_too_small(
2347 NVME_CC_MPS(n
->bar
.cc
),
2348 NVME_CAP_MPSMIN(n
->bar
.cap
));
2351 if (unlikely(NVME_CC_MPS(n
->bar
.cc
) >
2352 NVME_CAP_MPSMAX(n
->bar
.cap
))) {
2353 trace_pci_nvme_err_startfail_page_too_large(
2354 NVME_CC_MPS(n
->bar
.cc
),
2355 NVME_CAP_MPSMAX(n
->bar
.cap
));
2358 if (unlikely(NVME_CC_IOCQES(n
->bar
.cc
) <
2359 NVME_CTRL_CQES_MIN(n
->id_ctrl
.cqes
))) {
2360 trace_pci_nvme_err_startfail_cqent_too_small(
2361 NVME_CC_IOCQES(n
->bar
.cc
),
2362 NVME_CTRL_CQES_MIN(n
->bar
.cap
));
2365 if (unlikely(NVME_CC_IOCQES(n
->bar
.cc
) >
2366 NVME_CTRL_CQES_MAX(n
->id_ctrl
.cqes
))) {
2367 trace_pci_nvme_err_startfail_cqent_too_large(
2368 NVME_CC_IOCQES(n
->bar
.cc
),
2369 NVME_CTRL_CQES_MAX(n
->bar
.cap
));
2372 if (unlikely(NVME_CC_IOSQES(n
->bar
.cc
) <
2373 NVME_CTRL_SQES_MIN(n
->id_ctrl
.sqes
))) {
2374 trace_pci_nvme_err_startfail_sqent_too_small(
2375 NVME_CC_IOSQES(n
->bar
.cc
),
2376 NVME_CTRL_SQES_MIN(n
->bar
.cap
));
2379 if (unlikely(NVME_CC_IOSQES(n
->bar
.cc
) >
2380 NVME_CTRL_SQES_MAX(n
->id_ctrl
.sqes
))) {
2381 trace_pci_nvme_err_startfail_sqent_too_large(
2382 NVME_CC_IOSQES(n
->bar
.cc
),
2383 NVME_CTRL_SQES_MAX(n
->bar
.cap
));
2386 if (unlikely(!NVME_AQA_ASQS(n
->bar
.aqa
))) {
2387 trace_pci_nvme_err_startfail_asqent_sz_zero();
2390 if (unlikely(!NVME_AQA_ACQS(n
->bar
.aqa
))) {
2391 trace_pci_nvme_err_startfail_acqent_sz_zero();
2395 n
->page_bits
= page_bits
;
2396 n
->page_size
= page_size
;
2397 n
->max_prp_ents
= n
->page_size
/ sizeof(uint64_t);
2398 n
->cqe_size
= 1 << NVME_CC_IOCQES(n
->bar
.cc
);
2399 n
->sqe_size
= 1 << NVME_CC_IOSQES(n
->bar
.cc
);
2400 nvme_init_cq(&n
->admin_cq
, n
, n
->bar
.acq
, 0, 0,
2401 NVME_AQA_ACQS(n
->bar
.aqa
) + 1, 1);
2402 nvme_init_sq(&n
->admin_sq
, n
, n
->bar
.asq
, 0, 0,
2403 NVME_AQA_ASQS(n
->bar
.aqa
) + 1);
2405 nvme_set_timestamp(n
, 0ULL);
2407 QTAILQ_INIT(&n
->aer_queue
);
2412 static void nvme_write_bar(NvmeCtrl
*n
, hwaddr offset
, uint64_t data
,
2415 if (unlikely(offset
& (sizeof(uint32_t) - 1))) {
2416 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_misaligned32
,
2417 "MMIO write not 32-bit aligned,"
2418 " offset=0x%"PRIx64
"", offset
);
2419 /* should be ignored, fall through for now */
2422 if (unlikely(size
< sizeof(uint32_t))) {
2423 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_toosmall
,
2424 "MMIO write smaller than 32-bits,"
2425 " offset=0x%"PRIx64
", size=%u",
2427 /* should be ignored, fall through for now */
2431 case 0xc: /* INTMS */
2432 if (unlikely(msix_enabled(&(n
->parent_obj
)))) {
2433 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_intmask_with_msix
,
2434 "undefined access to interrupt mask set"
2435 " when MSI-X is enabled");
2436 /* should be ignored, fall through for now */
2438 n
->bar
.intms
|= data
& 0xffffffff;
2439 n
->bar
.intmc
= n
->bar
.intms
;
2440 trace_pci_nvme_mmio_intm_set(data
& 0xffffffff, n
->bar
.intmc
);
2443 case 0x10: /* INTMC */
2444 if (unlikely(msix_enabled(&(n
->parent_obj
)))) {
2445 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_intmask_with_msix
,
2446 "undefined access to interrupt mask clr"
2447 " when MSI-X is enabled");
2448 /* should be ignored, fall through for now */
2450 n
->bar
.intms
&= ~(data
& 0xffffffff);
2451 n
->bar
.intmc
= n
->bar
.intms
;
2452 trace_pci_nvme_mmio_intm_clr(data
& 0xffffffff, n
->bar
.intmc
);
2456 trace_pci_nvme_mmio_cfg(data
& 0xffffffff);
2457 /* Windows first sends data, then sends enable bit */
2458 if (!NVME_CC_EN(data
) && !NVME_CC_EN(n
->bar
.cc
) &&
2459 !NVME_CC_SHN(data
) && !NVME_CC_SHN(n
->bar
.cc
))
2464 if (NVME_CC_EN(data
) && !NVME_CC_EN(n
->bar
.cc
)) {
2466 if (unlikely(nvme_start_ctrl(n
))) {
2467 trace_pci_nvme_err_startfail();
2468 n
->bar
.csts
= NVME_CSTS_FAILED
;
2470 trace_pci_nvme_mmio_start_success();
2471 n
->bar
.csts
= NVME_CSTS_READY
;
2473 } else if (!NVME_CC_EN(data
) && NVME_CC_EN(n
->bar
.cc
)) {
2474 trace_pci_nvme_mmio_stopped();
2476 n
->bar
.csts
&= ~NVME_CSTS_READY
;
2478 if (NVME_CC_SHN(data
) && !(NVME_CC_SHN(n
->bar
.cc
))) {
2479 trace_pci_nvme_mmio_shutdown_set();
2482 n
->bar
.csts
|= NVME_CSTS_SHST_COMPLETE
;
2483 } else if (!NVME_CC_SHN(data
) && NVME_CC_SHN(n
->bar
.cc
)) {
2484 trace_pci_nvme_mmio_shutdown_cleared();
2485 n
->bar
.csts
&= ~NVME_CSTS_SHST_COMPLETE
;
2489 case 0x1C: /* CSTS */
2490 if (data
& (1 << 4)) {
2491 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_ssreset_w1c_unsupported
,
2492 "attempted to W1C CSTS.NSSRO"
2493 " but CAP.NSSRS is zero (not supported)");
2494 } else if (data
!= 0) {
2495 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_ro_csts
,
2496 "attempted to set a read only bit"
2497 " of controller status");
2500 case 0x20: /* NSSR */
2501 if (data
== 0x4E564D65) {
2502 trace_pci_nvme_ub_mmiowr_ssreset_unsupported();
2504 /* The spec says that writes of other values have no effect */
2508 case 0x24: /* AQA */
2509 n
->bar
.aqa
= data
& 0xffffffff;
2510 trace_pci_nvme_mmio_aqattr(data
& 0xffffffff);
2512 case 0x28: /* ASQ */
2514 trace_pci_nvme_mmio_asqaddr(data
);
2516 case 0x2c: /* ASQ hi */
2517 n
->bar
.asq
|= data
<< 32;
2518 trace_pci_nvme_mmio_asqaddr_hi(data
, n
->bar
.asq
);
2520 case 0x30: /* ACQ */
2521 trace_pci_nvme_mmio_acqaddr(data
);
2524 case 0x34: /* ACQ hi */
2525 n
->bar
.acq
|= data
<< 32;
2526 trace_pci_nvme_mmio_acqaddr_hi(data
, n
->bar
.acq
);
2528 case 0x38: /* CMBLOC */
2529 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_cmbloc_reserved
,
2530 "invalid write to reserved CMBLOC"
2531 " when CMBSZ is zero, ignored");
2533 case 0x3C: /* CMBSZ */
2534 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_cmbsz_readonly
,
2535 "invalid write to read only CMBSZ, ignored");
2537 case 0xE00: /* PMRCAP */
2538 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrcap_readonly
,
2539 "invalid write to PMRCAP register, ignored");
2541 case 0xE04: /* TODO PMRCTL */
2543 case 0xE08: /* PMRSTS */
2544 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrsts_readonly
,
2545 "invalid write to PMRSTS register, ignored");
2547 case 0xE0C: /* PMREBS */
2548 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrebs_readonly
,
2549 "invalid write to PMREBS register, ignored");
2551 case 0xE10: /* PMRSWTP */
2552 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrswtp_readonly
,
2553 "invalid write to PMRSWTP register, ignored");
2555 case 0xE14: /* TODO PMRMSC */
2558 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_invalid
,
2559 "invalid MMIO write,"
2560 " offset=0x%"PRIx64
", data=%"PRIx64
"",
2566 static uint64_t nvme_mmio_read(void *opaque
, hwaddr addr
, unsigned size
)
2568 NvmeCtrl
*n
= (NvmeCtrl
*)opaque
;
2569 uint8_t *ptr
= (uint8_t *)&n
->bar
;
2572 trace_pci_nvme_mmio_read(addr
);
2574 if (unlikely(addr
& (sizeof(uint32_t) - 1))) {
2575 NVME_GUEST_ERR(pci_nvme_ub_mmiord_misaligned32
,
2576 "MMIO read not 32-bit aligned,"
2577 " offset=0x%"PRIx64
"", addr
);
2578 /* should RAZ, fall through for now */
2579 } else if (unlikely(size
< sizeof(uint32_t))) {
2580 NVME_GUEST_ERR(pci_nvme_ub_mmiord_toosmall
,
2581 "MMIO read smaller than 32-bits,"
2582 " offset=0x%"PRIx64
"", addr
);
2583 /* should RAZ, fall through for now */
2586 if (addr
< sizeof(n
->bar
)) {
2588 * When PMRWBM bit 1 is set then read from
2589 * from PMRSTS should ensure prior writes
2590 * made it to persistent media
2592 if (addr
== 0xE08 &&
2593 (NVME_PMRCAP_PMRWBM(n
->bar
.pmrcap
) & 0x02)) {
2594 memory_region_msync(&n
->pmrdev
->mr
, 0, n
->pmrdev
->size
);
2596 memcpy(&val
, ptr
+ addr
, size
);
2598 NVME_GUEST_ERR(pci_nvme_ub_mmiord_invalid_ofs
,
2599 "MMIO read beyond last register,"
2600 " offset=0x%"PRIx64
", returning 0", addr
);
2606 static void nvme_process_db(NvmeCtrl
*n
, hwaddr addr
, int val
)
2610 if (unlikely(addr
& ((1 << 2) - 1))) {
2611 NVME_GUEST_ERR(pci_nvme_ub_db_wr_misaligned
,
2612 "doorbell write not 32-bit aligned,"
2613 " offset=0x%"PRIx64
", ignoring", addr
);
2617 if (((addr
- 0x1000) >> 2) & 1) {
2618 /* Completion queue doorbell write */
2620 uint16_t new_head
= val
& 0xffff;
2624 qid
= (addr
- (0x1000 + (1 << 2))) >> 3;
2625 if (unlikely(nvme_check_cqid(n
, qid
))) {
2626 NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_cq
,
2627 "completion queue doorbell write"
2628 " for nonexistent queue,"
2629 " sqid=%"PRIu32
", ignoring", qid
);
2632 * NVM Express v1.3d, Section 4.1 state: "If host software writes
2633 * an invalid value to the Submission Queue Tail Doorbell or
2634 * Completion Queue Head Doorbell regiter and an Asynchronous Event
2635 * Request command is outstanding, then an asynchronous event is
2636 * posted to the Admin Completion Queue with a status code of
2637 * Invalid Doorbell Write Value."
2639 * Also note that the spec includes the "Invalid Doorbell Register"
2640 * status code, but nowhere does it specify when to use it.
2641 * However, it seems reasonable to use it here in a similar
2644 if (n
->outstanding_aers
) {
2645 nvme_enqueue_event(n
, NVME_AER_TYPE_ERROR
,
2646 NVME_AER_INFO_ERR_INVALID_DB_REGISTER
,
2647 NVME_LOG_ERROR_INFO
);
2654 if (unlikely(new_head
>= cq
->size
)) {
2655 NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_cqhead
,
2656 "completion queue doorbell write value"
2657 " beyond queue size, sqid=%"PRIu32
","
2658 " new_head=%"PRIu16
", ignoring",
2661 if (n
->outstanding_aers
) {
2662 nvme_enqueue_event(n
, NVME_AER_TYPE_ERROR
,
2663 NVME_AER_INFO_ERR_INVALID_DB_VALUE
,
2664 NVME_LOG_ERROR_INFO
);
2670 trace_pci_nvme_mmio_doorbell_cq(cq
->cqid
, new_head
);
2672 start_sqs
= nvme_cq_full(cq
) ? 1 : 0;
2673 cq
->head
= new_head
;
2676 QTAILQ_FOREACH(sq
, &cq
->sq_list
, entry
) {
2677 timer_mod(sq
->timer
, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) + 500);
2679 timer_mod(cq
->timer
, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) + 500);
2682 if (cq
->tail
== cq
->head
) {
2683 nvme_irq_deassert(n
, cq
);
2686 /* Submission queue doorbell write */
2688 uint16_t new_tail
= val
& 0xffff;
2691 qid
= (addr
- 0x1000) >> 3;
2692 if (unlikely(nvme_check_sqid(n
, qid
))) {
2693 NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_sq
,
2694 "submission queue doorbell write"
2695 " for nonexistent queue,"
2696 " sqid=%"PRIu32
", ignoring", qid
);
2698 if (n
->outstanding_aers
) {
2699 nvme_enqueue_event(n
, NVME_AER_TYPE_ERROR
,
2700 NVME_AER_INFO_ERR_INVALID_DB_REGISTER
,
2701 NVME_LOG_ERROR_INFO
);
2708 if (unlikely(new_tail
>= sq
->size
)) {
2709 NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_sqtail
,
2710 "submission queue doorbell write value"
2711 " beyond queue size, sqid=%"PRIu32
","
2712 " new_tail=%"PRIu16
", ignoring",
2715 if (n
->outstanding_aers
) {
2716 nvme_enqueue_event(n
, NVME_AER_TYPE_ERROR
,
2717 NVME_AER_INFO_ERR_INVALID_DB_VALUE
,
2718 NVME_LOG_ERROR_INFO
);
2724 trace_pci_nvme_mmio_doorbell_sq(sq
->sqid
, new_tail
);
2726 sq
->tail
= new_tail
;
2727 timer_mod(sq
->timer
, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) + 500);
2731 static void nvme_mmio_write(void *opaque
, hwaddr addr
, uint64_t data
,
2734 NvmeCtrl
*n
= (NvmeCtrl
*)opaque
;
2736 trace_pci_nvme_mmio_write(addr
, data
);
2738 if (addr
< sizeof(n
->bar
)) {
2739 nvme_write_bar(n
, addr
, data
, size
);
2741 nvme_process_db(n
, addr
, data
);
2745 static const MemoryRegionOps nvme_mmio_ops
= {
2746 .read
= nvme_mmio_read
,
2747 .write
= nvme_mmio_write
,
2748 .endianness
= DEVICE_LITTLE_ENDIAN
,
2750 .min_access_size
= 2,
2751 .max_access_size
= 8,
2755 static void nvme_cmb_write(void *opaque
, hwaddr addr
, uint64_t data
,
2758 NvmeCtrl
*n
= (NvmeCtrl
*)opaque
;
2759 stn_le_p(&n
->cmbuf
[addr
], size
, data
);
2762 static uint64_t nvme_cmb_read(void *opaque
, hwaddr addr
, unsigned size
)
2764 NvmeCtrl
*n
= (NvmeCtrl
*)opaque
;
2765 return ldn_le_p(&n
->cmbuf
[addr
], size
);
2768 static const MemoryRegionOps nvme_cmb_ops
= {
2769 .read
= nvme_cmb_read
,
2770 .write
= nvme_cmb_write
,
2771 .endianness
= DEVICE_LITTLE_ENDIAN
,
2773 .min_access_size
= 1,
2774 .max_access_size
= 8,
2778 static void nvme_check_constraints(NvmeCtrl
*n
, Error
**errp
)
2780 NvmeParams
*params
= &n
->params
;
2782 if (params
->num_queues
) {
2783 warn_report("num_queues is deprecated; please use max_ioqpairs "
2786 params
->max_ioqpairs
= params
->num_queues
- 1;
2790 warn_report("drive property is deprecated; "
2791 "please use an nvme-ns device instead");
2794 if (params
->max_ioqpairs
< 1 ||
2795 params
->max_ioqpairs
> NVME_MAX_IOQPAIRS
) {
2796 error_setg(errp
, "max_ioqpairs must be between 1 and %d",
2801 if (params
->msix_qsize
< 1 ||
2802 params
->msix_qsize
> PCI_MSIX_FLAGS_QSIZE
+ 1) {
2803 error_setg(errp
, "msix_qsize must be between 1 and %d",
2804 PCI_MSIX_FLAGS_QSIZE
+ 1);
2808 if (!params
->serial
) {
2809 error_setg(errp
, "serial property not set");
2813 if (!n
->params
.cmb_size_mb
&& n
->pmrdev
) {
2814 if (host_memory_backend_is_mapped(n
->pmrdev
)) {
2815 error_setg(errp
, "can't use already busy memdev: %s",
2816 object_get_canonical_path_component(OBJECT(n
->pmrdev
)));
2820 if (!is_power_of_2(n
->pmrdev
->size
)) {
2821 error_setg(errp
, "pmr backend size needs to be power of 2 in size");
2825 host_memory_backend_set_mapped(n
->pmrdev
, true);
2829 static void nvme_init_state(NvmeCtrl
*n
)
2831 n
->num_namespaces
= NVME_MAX_NAMESPACES
;
2832 /* add one to max_ioqpairs to account for the admin queue pair */
2833 n
->reg_size
= pow2ceil(sizeof(NvmeBar
) +
2834 2 * (n
->params
.max_ioqpairs
+ 1) * NVME_DB_SIZE
);
2835 n
->sq
= g_new0(NvmeSQueue
*, n
->params
.max_ioqpairs
+ 1);
2836 n
->cq
= g_new0(NvmeCQueue
*, n
->params
.max_ioqpairs
+ 1);
2837 n
->temperature
= NVME_TEMPERATURE
;
2838 n
->features
.temp_thresh_hi
= NVME_TEMPERATURE_WARNING
;
2839 n
->starttime_ms
= qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
);
2840 n
->aer_reqs
= g_new0(NvmeRequest
*, n
->params
.aerl
+ 1);
2843 int nvme_register_namespace(NvmeCtrl
*n
, NvmeNamespace
*ns
, Error
**errp
)
2845 uint32_t nsid
= nvme_nsid(ns
);
2847 if (nsid
> NVME_MAX_NAMESPACES
) {
2848 error_setg(errp
, "invalid namespace id (must be between 0 and %d)",
2849 NVME_MAX_NAMESPACES
);
2854 for (int i
= 1; i
<= n
->num_namespaces
; i
++) {
2855 if (!nvme_ns(n
, i
)) {
2856 nsid
= ns
->params
.nsid
= i
;
2862 error_setg(errp
, "no free namespace id");
2866 if (n
->namespaces
[nsid
- 1]) {
2867 error_setg(errp
, "namespace id '%d' is already in use", nsid
);
2872 trace_pci_nvme_register_namespace(nsid
);
2874 n
->namespaces
[nsid
- 1] = ns
;
2879 static void nvme_init_cmb(NvmeCtrl
*n
, PCIDevice
*pci_dev
)
2881 NVME_CMBLOC_SET_BIR(n
->bar
.cmbloc
, NVME_CMB_BIR
);
2882 NVME_CMBLOC_SET_OFST(n
->bar
.cmbloc
, 0);
2884 NVME_CMBSZ_SET_SQS(n
->bar
.cmbsz
, 1);
2885 NVME_CMBSZ_SET_CQS(n
->bar
.cmbsz
, 0);
2886 NVME_CMBSZ_SET_LISTS(n
->bar
.cmbsz
, 1);
2887 NVME_CMBSZ_SET_RDS(n
->bar
.cmbsz
, 1);
2888 NVME_CMBSZ_SET_WDS(n
->bar
.cmbsz
, 1);
2889 NVME_CMBSZ_SET_SZU(n
->bar
.cmbsz
, 2); /* MBs */
2890 NVME_CMBSZ_SET_SZ(n
->bar
.cmbsz
, n
->params
.cmb_size_mb
);
2892 n
->cmbuf
= g_malloc0(NVME_CMBSZ_GETSIZE(n
->bar
.cmbsz
));
2893 memory_region_init_io(&n
->ctrl_mem
, OBJECT(n
), &nvme_cmb_ops
, n
,
2894 "nvme-cmb", NVME_CMBSZ_GETSIZE(n
->bar
.cmbsz
));
2895 pci_register_bar(pci_dev
, NVME_CMBLOC_BIR(n
->bar
.cmbloc
),
2896 PCI_BASE_ADDRESS_SPACE_MEMORY
|
2897 PCI_BASE_ADDRESS_MEM_TYPE_64
|
2898 PCI_BASE_ADDRESS_MEM_PREFETCH
, &n
->ctrl_mem
);
2901 static void nvme_init_pmr(NvmeCtrl
*n
, PCIDevice
*pci_dev
)
2903 /* Controller Capabilities register */
2904 NVME_CAP_SET_PMRS(n
->bar
.cap
, 1);
2906 /* PMR Capabities register */
2908 NVME_PMRCAP_SET_RDS(n
->bar
.pmrcap
, 0);
2909 NVME_PMRCAP_SET_WDS(n
->bar
.pmrcap
, 0);
2910 NVME_PMRCAP_SET_BIR(n
->bar
.pmrcap
, NVME_PMR_BIR
);
2911 NVME_PMRCAP_SET_PMRTU(n
->bar
.pmrcap
, 0);
2912 /* Turn on bit 1 support */
2913 NVME_PMRCAP_SET_PMRWBM(n
->bar
.pmrcap
, 0x02);
2914 NVME_PMRCAP_SET_PMRTO(n
->bar
.pmrcap
, 0);
2915 NVME_PMRCAP_SET_CMSS(n
->bar
.pmrcap
, 0);
2917 /* PMR Control register */
2919 NVME_PMRCTL_SET_EN(n
->bar
.pmrctl
, 0);
2921 /* PMR Status register */
2923 NVME_PMRSTS_SET_ERR(n
->bar
.pmrsts
, 0);
2924 NVME_PMRSTS_SET_NRDY(n
->bar
.pmrsts
, 0);
2925 NVME_PMRSTS_SET_HSTS(n
->bar
.pmrsts
, 0);
2926 NVME_PMRSTS_SET_CBAI(n
->bar
.pmrsts
, 0);
2928 /* PMR Elasticity Buffer Size register */
2930 NVME_PMREBS_SET_PMRSZU(n
->bar
.pmrebs
, 0);
2931 NVME_PMREBS_SET_RBB(n
->bar
.pmrebs
, 0);
2932 NVME_PMREBS_SET_PMRWBZ(n
->bar
.pmrebs
, 0);
2934 /* PMR Sustained Write Throughput register */
2936 NVME_PMRSWTP_SET_PMRSWTU(n
->bar
.pmrswtp
, 0);
2937 NVME_PMRSWTP_SET_PMRSWTV(n
->bar
.pmrswtp
, 0);
2939 /* PMR Memory Space Control register */
2941 NVME_PMRMSC_SET_CMSE(n
->bar
.pmrmsc
, 0);
2942 NVME_PMRMSC_SET_CBA(n
->bar
.pmrmsc
, 0);
2944 pci_register_bar(pci_dev
, NVME_PMRCAP_BIR(n
->bar
.pmrcap
),
2945 PCI_BASE_ADDRESS_SPACE_MEMORY
|
2946 PCI_BASE_ADDRESS_MEM_TYPE_64
|
2947 PCI_BASE_ADDRESS_MEM_PREFETCH
, &n
->pmrdev
->mr
);
2950 static void nvme_init_pci(NvmeCtrl
*n
, PCIDevice
*pci_dev
, Error
**errp
)
2952 uint8_t *pci_conf
= pci_dev
->config
;
2954 pci_conf
[PCI_INTERRUPT_PIN
] = 1;
2955 pci_config_set_prog_interface(pci_conf
, 0x2);
2957 if (n
->params
.use_intel_id
) {
2958 pci_config_set_vendor_id(pci_conf
, PCI_VENDOR_ID_INTEL
);
2959 pci_config_set_device_id(pci_conf
, 0x5845);
2961 pci_config_set_vendor_id(pci_conf
, PCI_VENDOR_ID_REDHAT
);
2962 pci_config_set_device_id(pci_conf
, PCI_DEVICE_ID_REDHAT_NVME
);
2965 pci_config_set_class(pci_conf
, PCI_CLASS_STORAGE_EXPRESS
);
2966 pcie_endpoint_cap_init(pci_dev
, 0x80);
2968 memory_region_init_io(&n
->iomem
, OBJECT(n
), &nvme_mmio_ops
, n
, "nvme",
2970 pci_register_bar(pci_dev
, 0, PCI_BASE_ADDRESS_SPACE_MEMORY
|
2971 PCI_BASE_ADDRESS_MEM_TYPE_64
, &n
->iomem
);
2972 if (msix_init_exclusive_bar(pci_dev
, n
->params
.msix_qsize
, 4, errp
)) {
2976 if (n
->params
.cmb_size_mb
) {
2977 nvme_init_cmb(n
, pci_dev
);
2978 } else if (n
->pmrdev
) {
2979 nvme_init_pmr(n
, pci_dev
);
2983 static void nvme_init_ctrl(NvmeCtrl
*n
, PCIDevice
*pci_dev
)
2985 NvmeIdCtrl
*id
= &n
->id_ctrl
;
2986 uint8_t *pci_conf
= pci_dev
->config
;
2989 id
->vid
= cpu_to_le16(pci_get_word(pci_conf
+ PCI_VENDOR_ID
));
2990 id
->ssvid
= cpu_to_le16(pci_get_word(pci_conf
+ PCI_SUBSYSTEM_VENDOR_ID
));
2991 strpadcpy((char *)id
->mn
, sizeof(id
->mn
), "QEMU NVMe Ctrl", ' ');
2992 strpadcpy((char *)id
->fr
, sizeof(id
->fr
), "1.0", ' ');
2993 strpadcpy((char *)id
->sn
, sizeof(id
->sn
), n
->params
.serial
, ' ');
2998 id
->mdts
= n
->params
.mdts
;
2999 id
->ver
= cpu_to_le32(NVME_SPEC_VER
);
3000 id
->oacs
= cpu_to_le16(0);
3003 * Because the controller always completes the Abort command immediately,
3004 * there can never be more than one concurrently executing Abort command,
3005 * so this value is never used for anything. Note that there can easily be
3006 * many Abort commands in the queues, but they are not considered
3007 * "executing" until processed by nvme_abort.
3009 * The specification recommends a value of 3 for Abort Command Limit (four
3010 * concurrently outstanding Abort commands), so lets use that though it is
3014 id
->aerl
= n
->params
.aerl
;
3015 id
->frmw
= (NVME_NUM_FW_SLOTS
<< 1) | NVME_FRMW_SLOT1_RO
;
3016 id
->lpa
= NVME_LPA_NS_SMART
| NVME_LPA_EXTENDED
;
3018 /* recommended default value (~70 C) */
3019 id
->wctemp
= cpu_to_le16(NVME_TEMPERATURE_WARNING
);
3020 id
->cctemp
= cpu_to_le16(NVME_TEMPERATURE_CRITICAL
);
3022 id
->sqes
= (0x6 << 4) | 0x6;
3023 id
->cqes
= (0x4 << 4) | 0x4;
3024 id
->nn
= cpu_to_le32(n
->num_namespaces
);
3025 id
->oncs
= cpu_to_le16(NVME_ONCS_WRITE_ZEROES
| NVME_ONCS_TIMESTAMP
|
3026 NVME_ONCS_FEATURES
| NVME_ONCS_DSM
|
3030 id
->sgls
= cpu_to_le32(NVME_CTRL_SGLS_SUPPORT_NO_ALIGN
|
3031 NVME_CTRL_SGLS_BITBUCKET
);
3033 subnqn
= g_strdup_printf("nqn.2019-08.org.qemu:%s", n
->params
.serial
);
3034 strpadcpy((char *)id
->subnqn
, sizeof(id
->subnqn
), subnqn
, '\0');
3037 id
->psd
[0].mp
= cpu_to_le16(0x9c4);
3038 id
->psd
[0].enlat
= cpu_to_le32(0x10);
3039 id
->psd
[0].exlat
= cpu_to_le32(0x4);
3041 NVME_CAP_SET_MQES(n
->bar
.cap
, 0x7ff);
3042 NVME_CAP_SET_CQR(n
->bar
.cap
, 1);
3043 NVME_CAP_SET_TO(n
->bar
.cap
, 0xf);
3044 NVME_CAP_SET_CSS(n
->bar
.cap
, NVME_CAP_CSS_NVM
);
3045 NVME_CAP_SET_CSS(n
->bar
.cap
, NVME_CAP_CSS_ADMIN_ONLY
);
3046 NVME_CAP_SET_MPSMAX(n
->bar
.cap
, 4);
3048 n
->bar
.vs
= NVME_SPEC_VER
;
3049 n
->bar
.intmc
= n
->bar
.intms
= 0;
3052 static void nvme_realize(PCIDevice
*pci_dev
, Error
**errp
)
3054 NvmeCtrl
*n
= NVME(pci_dev
);
3056 Error
*local_err
= NULL
;
3058 nvme_check_constraints(n
, &local_err
);
3060 error_propagate(errp
, local_err
);
3064 qbus_create_inplace(&n
->bus
, sizeof(NvmeBus
), TYPE_NVME_BUS
,
3065 &pci_dev
->qdev
, n
->parent_obj
.qdev
.id
);
3068 nvme_init_pci(n
, pci_dev
, &local_err
);
3070 error_propagate(errp
, local_err
);
3074 nvme_init_ctrl(n
, pci_dev
);
3076 /* setup a namespace if the controller drive property was given */
3077 if (n
->namespace.blkconf
.blk
) {
3079 ns
->params
.nsid
= 1;
3081 if (nvme_ns_setup(n
, ns
, errp
)) {
3087 static void nvme_exit(PCIDevice
*pci_dev
)
3089 NvmeCtrl
*n
= NVME(pci_dev
);
3094 g_free(n
->aer_reqs
);
3096 if (n
->params
.cmb_size_mb
) {
3101 host_memory_backend_set_mapped(n
->pmrdev
, false);
3103 msix_uninit_exclusive_bar(pci_dev
);
3106 static Property nvme_props
[] = {
3107 DEFINE_BLOCK_PROPERTIES(NvmeCtrl
, namespace.blkconf
),
3108 DEFINE_PROP_LINK("pmrdev", NvmeCtrl
, pmrdev
, TYPE_MEMORY_BACKEND
,
3109 HostMemoryBackend
*),
3110 DEFINE_PROP_STRING("serial", NvmeCtrl
, params
.serial
),
3111 DEFINE_PROP_UINT32("cmb_size_mb", NvmeCtrl
, params
.cmb_size_mb
, 0),
3112 DEFINE_PROP_UINT32("num_queues", NvmeCtrl
, params
.num_queues
, 0),
3113 DEFINE_PROP_UINT32("max_ioqpairs", NvmeCtrl
, params
.max_ioqpairs
, 64),
3114 DEFINE_PROP_UINT16("msix_qsize", NvmeCtrl
, params
.msix_qsize
, 65),
3115 DEFINE_PROP_UINT8("aerl", NvmeCtrl
, params
.aerl
, 3),
3116 DEFINE_PROP_UINT32("aer_max_queued", NvmeCtrl
, params
.aer_max_queued
, 64),
3117 DEFINE_PROP_UINT8("mdts", NvmeCtrl
, params
.mdts
, 7),
3118 DEFINE_PROP_BOOL("use-intel-id", NvmeCtrl
, params
.use_intel_id
, false),
3119 DEFINE_PROP_END_OF_LIST(),
3122 static const VMStateDescription nvme_vmstate
= {
3127 static void nvme_class_init(ObjectClass
*oc
, void *data
)
3129 DeviceClass
*dc
= DEVICE_CLASS(oc
);
3130 PCIDeviceClass
*pc
= PCI_DEVICE_CLASS(oc
);
3132 pc
->realize
= nvme_realize
;
3133 pc
->exit
= nvme_exit
;
3134 pc
->class_id
= PCI_CLASS_STORAGE_EXPRESS
;
3137 set_bit(DEVICE_CATEGORY_STORAGE
, dc
->categories
);
3138 dc
->desc
= "Non-Volatile Memory Express";
3139 device_class_set_props(dc
, nvme_props
);
3140 dc
->vmsd
= &nvme_vmstate
;
3143 static void nvme_instance_init(Object
*obj
)
3145 NvmeCtrl
*s
= NVME(obj
);
3147 if (s
->namespace.blkconf
.blk
) {
3148 device_add_bootindex_property(obj
, &s
->namespace.blkconf
.bootindex
,
3149 "bootindex", "/namespace@1,0",
3154 static const TypeInfo nvme_info
= {
3156 .parent
= TYPE_PCI_DEVICE
,
3157 .instance_size
= sizeof(NvmeCtrl
),
3158 .instance_init
= nvme_instance_init
,
3159 .class_init
= nvme_class_init
,
3160 .interfaces
= (InterfaceInfo
[]) {
3161 { INTERFACE_PCIE_DEVICE
},
3166 static const TypeInfo nvme_bus_info
= {
3167 .name
= TYPE_NVME_BUS
,
3169 .instance_size
= sizeof(NvmeBus
),
3172 static void nvme_register_types(void)
3174 type_register_static(&nvme_info
);
3175 type_register_static(&nvme_bus_info
);
3178 type_init(nvme_register_types
)