2 * QEMU NVM Express Controller
4 * Copyright (c) 2012, Intel Corporation
6 * Written by Keith Busch <keith.busch@intel.com>
8 * This code is licensed under the GNU GPL v2 or later.
12 * Reference Specs: http://www.nvmexpress.org, 1.4, 1.3, 1.2, 1.1, 1.0e
14 * https://nvmexpress.org/developers/nvme-specification/
19 * -drive file=<file>,if=none,id=<drive_id>
20 * -device nvme,serial=<serial>,id=<bus_name>, \
21 * cmb_size_mb=<cmb_size_mb[optional]>, \
22 * [pmrdev=<mem_backend_file_id>,] \
23 * max_ioqpairs=<N[optional]>, \
24 * aerl=<N[optional]>, aer_max_queued=<N[optional]>, \
25 * mdts=<N[optional]>,zoned.append_size_limit=<N[optional]> \
26 * -device nvme-ns,drive=<drive_id>,bus=<bus_name>,nsid=<nsid>,\
27 * zoned=<true|false[optional]>
29 * Note cmb_size_mb denotes size of CMB in MB. CMB is assumed to be at
30 * offset 0 in BAR2 and supports only WDS, RDS and SQS for now. By default, the
31 * device will use the "v1.4 CMB scheme" - use the `legacy-cmb` parameter to
32 * always enable the CMBLOC and CMBSZ registers (v1.3 behavior).
34 * Enabling pmr emulation can be achieved by pointing to memory-backend-file.
36 * -object memory-backend-file,id=<mem_id>,share=on,mem-path=<file_path>, \
37 * size=<size> .... -device nvme,...,pmrdev=<mem_id>
39 * The PMR will use BAR 4/5 exclusively.
42 * nvme device parameters
43 * ~~~~~~~~~~~~~~~~~~~~~~
45 * The Asynchronous Event Request Limit (AERL). Indicates the maximum number
46 * of concurrently outstanding Asynchronous Event Request commands support
47 * by the controller. This is a 0's based value.
50 * This is the maximum number of events that the device will enqueue for
51 * completion when there are no outstanding AERs. When the maximum number of
52 * enqueued events are reached, subsequent events will be dropped.
54 * - `zoned.append_size_limit`
55 * The maximum I/O size in bytes that is allowed in Zone Append command.
56 * The default is 128KiB. Since internally this this value is maintained as
57 * ZASL = log2(<maximum append size> / <page size>), some values assigned
58 * to this property may be rounded down and result in a lower maximum ZA
59 * data size being in effect. By setting this property to 0, users can make
60 * ZASL to be equal to MDTS. This property only affects zoned namespaces.
62 * Setting `zoned` to true selects Zoned Command Set at the namespace.
63 * In this case, the following namespace properties are available to configure
65 * zoned.zone_size=<zone size in bytes, default: 128MiB>
66 * The number may be followed by K, M, G as in kilo-, mega- or giga-.
68 * zoned.zone_capacity=<zone capacity in bytes, default: zone size>
69 * The value 0 (default) forces zone capacity to be the same as zone
70 * size. The value of this property may not exceed zone size.
72 * zoned.descr_ext_size=<zone descriptor extension size, default 0>
73 * This value needs to be specified in 64B units. If it is zero,
74 * namespace(s) will not support zone descriptor extensions.
76 * zoned.max_active=<Maximum Active Resources (zones), default: 0>
77 * The default value means there is no limit to the number of
78 * concurrently active zones.
80 * zoned.max_open=<Maximum Open Resources (zones), default: 0>
81 * The default value means there is no limit to the number of
82 * concurrently open zones.
84 * zoned.cross_read=<enable RAZB, default: false>
85 * Setting this property to true enables Read Across Zone Boundaries.
88 #include "qemu/osdep.h"
89 #include "qemu/units.h"
90 #include "qemu/error-report.h"
91 #include "hw/block/block.h"
92 #include "hw/pci/msix.h"
93 #include "hw/pci/pci.h"
94 #include "hw/qdev-properties.h"
95 #include "migration/vmstate.h"
96 #include "sysemu/sysemu.h"
97 #include "qapi/error.h"
98 #include "qapi/visitor.h"
99 #include "sysemu/hostmem.h"
100 #include "sysemu/block-backend.h"
101 #include "exec/memory.h"
102 #include "qemu/log.h"
103 #include "qemu/module.h"
104 #include "qemu/cutils.h"
109 #define NVME_MAX_IOQPAIRS 0xffff
110 #define NVME_DB_SIZE 4
111 #define NVME_SPEC_VER 0x00010400
112 #define NVME_CMB_BIR 2
113 #define NVME_PMR_BIR 4
114 #define NVME_TEMPERATURE 0x143
115 #define NVME_TEMPERATURE_WARNING 0x157
116 #define NVME_TEMPERATURE_CRITICAL 0x175
117 #define NVME_NUM_FW_SLOTS 1
119 #define NVME_GUEST_ERR(trace, fmt, ...) \
121 (trace_##trace)(__VA_ARGS__); \
122 qemu_log_mask(LOG_GUEST_ERROR, #trace \
123 " in %s: " fmt "\n", __func__, ## __VA_ARGS__); \
126 static const bool nvme_feature_support
[NVME_FID_MAX
] = {
127 [NVME_ARBITRATION
] = true,
128 [NVME_POWER_MANAGEMENT
] = true,
129 [NVME_TEMPERATURE_THRESHOLD
] = true,
130 [NVME_ERROR_RECOVERY
] = true,
131 [NVME_VOLATILE_WRITE_CACHE
] = true,
132 [NVME_NUMBER_OF_QUEUES
] = true,
133 [NVME_INTERRUPT_COALESCING
] = true,
134 [NVME_INTERRUPT_VECTOR_CONF
] = true,
135 [NVME_WRITE_ATOMICITY
] = true,
136 [NVME_ASYNCHRONOUS_EVENT_CONF
] = true,
137 [NVME_TIMESTAMP
] = true,
140 static const uint32_t nvme_feature_cap
[NVME_FID_MAX
] = {
141 [NVME_TEMPERATURE_THRESHOLD
] = NVME_FEAT_CAP_CHANGE
,
142 [NVME_ERROR_RECOVERY
] = NVME_FEAT_CAP_CHANGE
| NVME_FEAT_CAP_NS
,
143 [NVME_VOLATILE_WRITE_CACHE
] = NVME_FEAT_CAP_CHANGE
,
144 [NVME_NUMBER_OF_QUEUES
] = NVME_FEAT_CAP_CHANGE
,
145 [NVME_ASYNCHRONOUS_EVENT_CONF
] = NVME_FEAT_CAP_CHANGE
,
146 [NVME_TIMESTAMP
] = NVME_FEAT_CAP_CHANGE
,
149 static const uint32_t nvme_cse_acs
[256] = {
150 [NVME_ADM_CMD_DELETE_SQ
] = NVME_CMD_EFF_CSUPP
,
151 [NVME_ADM_CMD_CREATE_SQ
] = NVME_CMD_EFF_CSUPP
,
152 [NVME_ADM_CMD_GET_LOG_PAGE
] = NVME_CMD_EFF_CSUPP
,
153 [NVME_ADM_CMD_DELETE_CQ
] = NVME_CMD_EFF_CSUPP
,
154 [NVME_ADM_CMD_CREATE_CQ
] = NVME_CMD_EFF_CSUPP
,
155 [NVME_ADM_CMD_IDENTIFY
] = NVME_CMD_EFF_CSUPP
,
156 [NVME_ADM_CMD_ABORT
] = NVME_CMD_EFF_CSUPP
,
157 [NVME_ADM_CMD_SET_FEATURES
] = NVME_CMD_EFF_CSUPP
,
158 [NVME_ADM_CMD_GET_FEATURES
] = NVME_CMD_EFF_CSUPP
,
159 [NVME_ADM_CMD_ASYNC_EV_REQ
] = NVME_CMD_EFF_CSUPP
,
162 static const uint32_t nvme_cse_iocs_none
[256];
164 static const uint32_t nvme_cse_iocs_nvm
[256] = {
165 [NVME_CMD_FLUSH
] = NVME_CMD_EFF_CSUPP
| NVME_CMD_EFF_LBCC
,
166 [NVME_CMD_WRITE_ZEROES
] = NVME_CMD_EFF_CSUPP
| NVME_CMD_EFF_LBCC
,
167 [NVME_CMD_WRITE
] = NVME_CMD_EFF_CSUPP
| NVME_CMD_EFF_LBCC
,
168 [NVME_CMD_READ
] = NVME_CMD_EFF_CSUPP
,
169 [NVME_CMD_DSM
] = NVME_CMD_EFF_CSUPP
| NVME_CMD_EFF_LBCC
,
170 [NVME_CMD_COMPARE
] = NVME_CMD_EFF_CSUPP
,
173 static const uint32_t nvme_cse_iocs_zoned
[256] = {
174 [NVME_CMD_FLUSH
] = NVME_CMD_EFF_CSUPP
| NVME_CMD_EFF_LBCC
,
175 [NVME_CMD_WRITE_ZEROES
] = NVME_CMD_EFF_CSUPP
| NVME_CMD_EFF_LBCC
,
176 [NVME_CMD_WRITE
] = NVME_CMD_EFF_CSUPP
| NVME_CMD_EFF_LBCC
,
177 [NVME_CMD_READ
] = NVME_CMD_EFF_CSUPP
,
178 [NVME_CMD_DSM
] = NVME_CMD_EFF_CSUPP
| NVME_CMD_EFF_LBCC
,
179 [NVME_CMD_COMPARE
] = NVME_CMD_EFF_CSUPP
,
180 [NVME_CMD_ZONE_APPEND
] = NVME_CMD_EFF_CSUPP
| NVME_CMD_EFF_LBCC
,
181 [NVME_CMD_ZONE_MGMT_SEND
] = NVME_CMD_EFF_CSUPP
| NVME_CMD_EFF_LBCC
,
182 [NVME_CMD_ZONE_MGMT_RECV
] = NVME_CMD_EFF_CSUPP
,
185 static void nvme_process_sq(void *opaque
);
187 static uint16_t nvme_cid(NvmeRequest
*req
)
193 return le16_to_cpu(req
->cqe
.cid
);
196 static uint16_t nvme_sqid(NvmeRequest
*req
)
198 return le16_to_cpu(req
->sq
->sqid
);
201 static void nvme_assign_zone_state(NvmeNamespace
*ns
, NvmeZone
*zone
,
204 if (QTAILQ_IN_USE(zone
, entry
)) {
205 switch (nvme_get_zone_state(zone
)) {
206 case NVME_ZONE_STATE_EXPLICITLY_OPEN
:
207 QTAILQ_REMOVE(&ns
->exp_open_zones
, zone
, entry
);
209 case NVME_ZONE_STATE_IMPLICITLY_OPEN
:
210 QTAILQ_REMOVE(&ns
->imp_open_zones
, zone
, entry
);
212 case NVME_ZONE_STATE_CLOSED
:
213 QTAILQ_REMOVE(&ns
->closed_zones
, zone
, entry
);
215 case NVME_ZONE_STATE_FULL
:
216 QTAILQ_REMOVE(&ns
->full_zones
, zone
, entry
);
222 nvme_set_zone_state(zone
, state
);
225 case NVME_ZONE_STATE_EXPLICITLY_OPEN
:
226 QTAILQ_INSERT_TAIL(&ns
->exp_open_zones
, zone
, entry
);
228 case NVME_ZONE_STATE_IMPLICITLY_OPEN
:
229 QTAILQ_INSERT_TAIL(&ns
->imp_open_zones
, zone
, entry
);
231 case NVME_ZONE_STATE_CLOSED
:
232 QTAILQ_INSERT_TAIL(&ns
->closed_zones
, zone
, entry
);
234 case NVME_ZONE_STATE_FULL
:
235 QTAILQ_INSERT_TAIL(&ns
->full_zones
, zone
, entry
);
236 case NVME_ZONE_STATE_READ_ONLY
:
244 * Check if we can open a zone without exceeding open/active limits.
245 * AOR stands for "Active and Open Resources" (see TP 4053 section 2.5).
247 static int nvme_aor_check(NvmeNamespace
*ns
, uint32_t act
, uint32_t opn
)
249 if (ns
->params
.max_active_zones
!= 0 &&
250 ns
->nr_active_zones
+ act
> ns
->params
.max_active_zones
) {
251 trace_pci_nvme_err_insuff_active_res(ns
->params
.max_active_zones
);
252 return NVME_ZONE_TOO_MANY_ACTIVE
| NVME_DNR
;
254 if (ns
->params
.max_open_zones
!= 0 &&
255 ns
->nr_open_zones
+ opn
> ns
->params
.max_open_zones
) {
256 trace_pci_nvme_err_insuff_open_res(ns
->params
.max_open_zones
);
257 return NVME_ZONE_TOO_MANY_OPEN
| NVME_DNR
;
263 static bool nvme_addr_is_cmb(NvmeCtrl
*n
, hwaddr addr
)
271 lo
= n
->params
.legacy_cmb
? n
->cmb
.mem
.addr
: n
->cmb
.cba
;
272 hi
= lo
+ int128_get64(n
->cmb
.mem
.size
);
274 return addr
>= lo
&& addr
< hi
;
277 static inline void *nvme_addr_to_cmb(NvmeCtrl
*n
, hwaddr addr
)
279 hwaddr base
= n
->params
.legacy_cmb
? n
->cmb
.mem
.addr
: n
->cmb
.cba
;
280 return &n
->cmb
.buf
[addr
- base
];
283 static bool nvme_addr_is_pmr(NvmeCtrl
*n
, hwaddr addr
)
291 hi
= n
->pmr
.cba
+ int128_get64(n
->pmr
.dev
->mr
.size
);
293 return addr
>= n
->pmr
.cba
&& addr
< hi
;
296 static inline void *nvme_addr_to_pmr(NvmeCtrl
*n
, hwaddr addr
)
298 return memory_region_get_ram_ptr(&n
->pmr
.dev
->mr
) + (addr
- n
->pmr
.cba
);
301 static int nvme_addr_read(NvmeCtrl
*n
, hwaddr addr
, void *buf
, int size
)
303 hwaddr hi
= addr
+ size
- 1;
308 if (n
->bar
.cmbsz
&& nvme_addr_is_cmb(n
, addr
) && nvme_addr_is_cmb(n
, hi
)) {
309 memcpy(buf
, nvme_addr_to_cmb(n
, addr
), size
);
313 if (nvme_addr_is_pmr(n
, addr
) && nvme_addr_is_pmr(n
, hi
)) {
314 memcpy(buf
, nvme_addr_to_pmr(n
, addr
), size
);
318 return pci_dma_read(&n
->parent_obj
, addr
, buf
, size
);
321 static bool nvme_nsid_valid(NvmeCtrl
*n
, uint32_t nsid
)
323 return nsid
&& (nsid
== NVME_NSID_BROADCAST
|| nsid
<= n
->num_namespaces
);
326 static int nvme_check_sqid(NvmeCtrl
*n
, uint16_t sqid
)
328 return sqid
< n
->params
.max_ioqpairs
+ 1 && n
->sq
[sqid
] != NULL
? 0 : -1;
331 static int nvme_check_cqid(NvmeCtrl
*n
, uint16_t cqid
)
333 return cqid
< n
->params
.max_ioqpairs
+ 1 && n
->cq
[cqid
] != NULL
? 0 : -1;
336 static void nvme_inc_cq_tail(NvmeCQueue
*cq
)
339 if (cq
->tail
>= cq
->size
) {
341 cq
->phase
= !cq
->phase
;
345 static void nvme_inc_sq_head(NvmeSQueue
*sq
)
347 sq
->head
= (sq
->head
+ 1) % sq
->size
;
350 static uint8_t nvme_cq_full(NvmeCQueue
*cq
)
352 return (cq
->tail
+ 1) % cq
->size
== cq
->head
;
355 static uint8_t nvme_sq_empty(NvmeSQueue
*sq
)
357 return sq
->head
== sq
->tail
;
360 static void nvme_irq_check(NvmeCtrl
*n
)
362 if (msix_enabled(&(n
->parent_obj
))) {
365 if (~n
->bar
.intms
& n
->irq_status
) {
366 pci_irq_assert(&n
->parent_obj
);
368 pci_irq_deassert(&n
->parent_obj
);
372 static void nvme_irq_assert(NvmeCtrl
*n
, NvmeCQueue
*cq
)
374 if (cq
->irq_enabled
) {
375 if (msix_enabled(&(n
->parent_obj
))) {
376 trace_pci_nvme_irq_msix(cq
->vector
);
377 msix_notify(&(n
->parent_obj
), cq
->vector
);
379 trace_pci_nvme_irq_pin();
380 assert(cq
->vector
< 32);
381 n
->irq_status
|= 1 << cq
->vector
;
385 trace_pci_nvme_irq_masked();
389 static void nvme_irq_deassert(NvmeCtrl
*n
, NvmeCQueue
*cq
)
391 if (cq
->irq_enabled
) {
392 if (msix_enabled(&(n
->parent_obj
))) {
395 assert(cq
->vector
< 32);
396 n
->irq_status
&= ~(1 << cq
->vector
);
402 static void nvme_req_clear(NvmeRequest
*req
)
406 memset(&req
->cqe
, 0x0, sizeof(req
->cqe
));
407 req
->status
= NVME_SUCCESS
;
410 static void nvme_req_exit(NvmeRequest
*req
)
413 qemu_sglist_destroy(&req
->qsg
);
417 qemu_iovec_destroy(&req
->iov
);
421 static uint16_t nvme_map_addr_cmb(NvmeCtrl
*n
, QEMUIOVector
*iov
, hwaddr addr
,
428 trace_pci_nvme_map_addr_cmb(addr
, len
);
430 if (!nvme_addr_is_cmb(n
, addr
) || !nvme_addr_is_cmb(n
, addr
+ len
- 1)) {
431 return NVME_DATA_TRAS_ERROR
;
434 qemu_iovec_add(iov
, nvme_addr_to_cmb(n
, addr
), len
);
439 static uint16_t nvme_map_addr_pmr(NvmeCtrl
*n
, QEMUIOVector
*iov
, hwaddr addr
,
446 if (!nvme_addr_is_pmr(n
, addr
) || !nvme_addr_is_pmr(n
, addr
+ len
- 1)) {
447 return NVME_DATA_TRAS_ERROR
;
450 qemu_iovec_add(iov
, nvme_addr_to_pmr(n
, addr
), len
);
455 static uint16_t nvme_map_addr(NvmeCtrl
*n
, QEMUSGList
*qsg
, QEMUIOVector
*iov
,
456 hwaddr addr
, size_t len
)
458 bool cmb
= false, pmr
= false;
464 trace_pci_nvme_map_addr(addr
, len
);
466 if (nvme_addr_is_cmb(n
, addr
)) {
468 } else if (nvme_addr_is_pmr(n
, addr
)) {
473 if (qsg
&& qsg
->sg
) {
474 return NVME_INVALID_USE_OF_CMB
| NVME_DNR
;
480 qemu_iovec_init(iov
, 1);
484 return nvme_map_addr_cmb(n
, iov
, addr
, len
);
486 return nvme_map_addr_pmr(n
, iov
, addr
, len
);
490 if (iov
&& iov
->iov
) {
491 return NVME_INVALID_USE_OF_CMB
| NVME_DNR
;
497 pci_dma_sglist_init(qsg
, &n
->parent_obj
, 1);
500 qemu_sglist_add(qsg
, addr
, len
);
505 static uint16_t nvme_map_prp(NvmeCtrl
*n
, uint64_t prp1
, uint64_t prp2
,
506 uint32_t len
, NvmeRequest
*req
)
508 hwaddr trans_len
= n
->page_size
- (prp1
% n
->page_size
);
509 trans_len
= MIN(len
, trans_len
);
510 int num_prps
= (len
>> n
->page_bits
) + 1;
514 QEMUSGList
*qsg
= &req
->qsg
;
515 QEMUIOVector
*iov
= &req
->iov
;
517 trace_pci_nvme_map_prp(trans_len
, len
, prp1
, prp2
, num_prps
);
519 if (nvme_addr_is_cmb(n
, prp1
) || (nvme_addr_is_pmr(n
, prp1
))) {
520 qemu_iovec_init(iov
, num_prps
);
522 pci_dma_sglist_init(qsg
, &n
->parent_obj
, num_prps
);
525 status
= nvme_map_addr(n
, qsg
, iov
, prp1
, trans_len
);
532 if (len
> n
->page_size
) {
533 uint64_t prp_list
[n
->max_prp_ents
];
534 uint32_t nents
, prp_trans
;
537 nents
= (len
+ n
->page_size
- 1) >> n
->page_bits
;
538 prp_trans
= MIN(n
->max_prp_ents
, nents
) * sizeof(uint64_t);
539 ret
= nvme_addr_read(n
, prp2
, (void *)prp_list
, prp_trans
);
541 trace_pci_nvme_err_addr_read(prp2
);
542 return NVME_DATA_TRAS_ERROR
;
545 uint64_t prp_ent
= le64_to_cpu(prp_list
[i
]);
547 if (i
== n
->max_prp_ents
- 1 && len
> n
->page_size
) {
548 if (unlikely(prp_ent
& (n
->page_size
- 1))) {
549 trace_pci_nvme_err_invalid_prplist_ent(prp_ent
);
550 return NVME_INVALID_PRP_OFFSET
| NVME_DNR
;
554 nents
= (len
+ n
->page_size
- 1) >> n
->page_bits
;
555 prp_trans
= MIN(n
->max_prp_ents
, nents
) * sizeof(uint64_t);
556 ret
= nvme_addr_read(n
, prp_ent
, (void *)prp_list
,
559 trace_pci_nvme_err_addr_read(prp_ent
);
560 return NVME_DATA_TRAS_ERROR
;
562 prp_ent
= le64_to_cpu(prp_list
[i
]);
565 if (unlikely(prp_ent
& (n
->page_size
- 1))) {
566 trace_pci_nvme_err_invalid_prplist_ent(prp_ent
);
567 return NVME_INVALID_PRP_OFFSET
| NVME_DNR
;
570 trans_len
= MIN(len
, n
->page_size
);
571 status
= nvme_map_addr(n
, qsg
, iov
, prp_ent
, trans_len
);
580 if (unlikely(prp2
& (n
->page_size
- 1))) {
581 trace_pci_nvme_err_invalid_prp2_align(prp2
);
582 return NVME_INVALID_PRP_OFFSET
| NVME_DNR
;
584 status
= nvme_map_addr(n
, qsg
, iov
, prp2
, len
);
595 * Map 'nsgld' data descriptors from 'segment'. The function will subtract the
596 * number of bytes mapped in len.
598 static uint16_t nvme_map_sgl_data(NvmeCtrl
*n
, QEMUSGList
*qsg
,
600 NvmeSglDescriptor
*segment
, uint64_t nsgld
,
601 size_t *len
, NvmeRequest
*req
)
603 dma_addr_t addr
, trans_len
;
607 for (int i
= 0; i
< nsgld
; i
++) {
608 uint8_t type
= NVME_SGL_TYPE(segment
[i
].type
);
611 case NVME_SGL_DESCR_TYPE_BIT_BUCKET
:
612 if (req
->cmd
.opcode
== NVME_CMD_WRITE
) {
615 case NVME_SGL_DESCR_TYPE_DATA_BLOCK
:
617 case NVME_SGL_DESCR_TYPE_SEGMENT
:
618 case NVME_SGL_DESCR_TYPE_LAST_SEGMENT
:
619 return NVME_INVALID_NUM_SGL_DESCRS
| NVME_DNR
;
621 return NVME_SGL_DESCR_TYPE_INVALID
| NVME_DNR
;
624 dlen
= le32_to_cpu(segment
[i
].len
);
632 * All data has been mapped, but the SGL contains additional
633 * segments and/or descriptors. The controller might accept
634 * ignoring the rest of the SGL.
636 uint32_t sgls
= le32_to_cpu(n
->id_ctrl
.sgls
);
637 if (sgls
& NVME_CTRL_SGLS_EXCESS_LENGTH
) {
641 trace_pci_nvme_err_invalid_sgl_excess_length(nvme_cid(req
));
642 return NVME_DATA_SGL_LEN_INVALID
| NVME_DNR
;
645 trans_len
= MIN(*len
, dlen
);
647 if (type
== NVME_SGL_DESCR_TYPE_BIT_BUCKET
) {
651 addr
= le64_to_cpu(segment
[i
].addr
);
653 if (UINT64_MAX
- addr
< dlen
) {
654 return NVME_DATA_SGL_LEN_INVALID
| NVME_DNR
;
657 status
= nvme_map_addr(n
, qsg
, iov
, addr
, trans_len
);
669 static uint16_t nvme_map_sgl(NvmeCtrl
*n
, QEMUSGList
*qsg
, QEMUIOVector
*iov
,
670 NvmeSglDescriptor sgl
, size_t len
,
674 * Read the segment in chunks of 256 descriptors (one 4k page) to avoid
675 * dynamically allocating a potentially huge SGL. The spec allows the SGL
676 * to be larger (as in number of bytes required to describe the SGL
677 * descriptors and segment chain) than the command transfer size, so it is
678 * not bounded by MDTS.
680 const int SEG_CHUNK_SIZE
= 256;
682 NvmeSglDescriptor segment
[SEG_CHUNK_SIZE
], *sgld
, *last_sgld
;
690 addr
= le64_to_cpu(sgl
.addr
);
692 trace_pci_nvme_map_sgl(nvme_cid(req
), NVME_SGL_TYPE(sgl
.type
), len
);
695 * If the entire transfer can be described with a single data block it can
696 * be mapped directly.
698 if (NVME_SGL_TYPE(sgl
.type
) == NVME_SGL_DESCR_TYPE_DATA_BLOCK
) {
699 status
= nvme_map_sgl_data(n
, qsg
, iov
, sgld
, 1, &len
, req
);
708 switch (NVME_SGL_TYPE(sgld
->type
)) {
709 case NVME_SGL_DESCR_TYPE_SEGMENT
:
710 case NVME_SGL_DESCR_TYPE_LAST_SEGMENT
:
713 return NVME_INVALID_SGL_SEG_DESCR
| NVME_DNR
;
716 seg_len
= le32_to_cpu(sgld
->len
);
718 /* check the length of the (Last) Segment descriptor */
719 if ((!seg_len
|| seg_len
& 0xf) &&
720 (NVME_SGL_TYPE(sgld
->type
) != NVME_SGL_DESCR_TYPE_BIT_BUCKET
)) {
721 return NVME_INVALID_SGL_SEG_DESCR
| NVME_DNR
;
724 if (UINT64_MAX
- addr
< seg_len
) {
725 return NVME_DATA_SGL_LEN_INVALID
| NVME_DNR
;
728 nsgld
= seg_len
/ sizeof(NvmeSglDescriptor
);
730 while (nsgld
> SEG_CHUNK_SIZE
) {
731 if (nvme_addr_read(n
, addr
, segment
, sizeof(segment
))) {
732 trace_pci_nvme_err_addr_read(addr
);
733 status
= NVME_DATA_TRAS_ERROR
;
737 status
= nvme_map_sgl_data(n
, qsg
, iov
, segment
, SEG_CHUNK_SIZE
,
743 nsgld
-= SEG_CHUNK_SIZE
;
744 addr
+= SEG_CHUNK_SIZE
* sizeof(NvmeSglDescriptor
);
747 ret
= nvme_addr_read(n
, addr
, segment
, nsgld
*
748 sizeof(NvmeSglDescriptor
));
750 trace_pci_nvme_err_addr_read(addr
);
751 status
= NVME_DATA_TRAS_ERROR
;
755 last_sgld
= &segment
[nsgld
- 1];
758 * If the segment ends with a Data Block or Bit Bucket Descriptor Type,
761 switch (NVME_SGL_TYPE(last_sgld
->type
)) {
762 case NVME_SGL_DESCR_TYPE_DATA_BLOCK
:
763 case NVME_SGL_DESCR_TYPE_BIT_BUCKET
:
764 status
= nvme_map_sgl_data(n
, qsg
, iov
, segment
, nsgld
, &len
, req
);
776 * If the last descriptor was not a Data Block or Bit Bucket, then the
777 * current segment must not be a Last Segment.
779 if (NVME_SGL_TYPE(sgld
->type
) == NVME_SGL_DESCR_TYPE_LAST_SEGMENT
) {
780 status
= NVME_INVALID_SGL_SEG_DESCR
| NVME_DNR
;
785 addr
= le64_to_cpu(sgld
->addr
);
788 * Do not map the last descriptor; it will be a Segment or Last Segment
789 * descriptor and is handled by the next iteration.
791 status
= nvme_map_sgl_data(n
, qsg
, iov
, segment
, nsgld
- 1, &len
, req
);
798 /* if there is any residual left in len, the SGL was too short */
800 status
= NVME_DATA_SGL_LEN_INVALID
| NVME_DNR
;
808 qemu_iovec_destroy(iov
);
812 qemu_sglist_destroy(qsg
);
818 static uint16_t nvme_map_dptr(NvmeCtrl
*n
, size_t len
, NvmeRequest
*req
)
822 switch (NVME_CMD_FLAGS_PSDT(req
->cmd
.flags
)) {
824 prp1
= le64_to_cpu(req
->cmd
.dptr
.prp1
);
825 prp2
= le64_to_cpu(req
->cmd
.dptr
.prp2
);
827 return nvme_map_prp(n
, prp1
, prp2
, len
, req
);
828 case NVME_PSDT_SGL_MPTR_CONTIGUOUS
:
829 case NVME_PSDT_SGL_MPTR_SGL
:
830 /* SGLs shall not be used for Admin commands in NVMe over PCIe */
831 if (!req
->sq
->sqid
) {
832 return NVME_INVALID_FIELD
| NVME_DNR
;
835 return nvme_map_sgl(n
, &req
->qsg
, &req
->iov
, req
->cmd
.dptr
.sgl
, len
,
838 return NVME_INVALID_FIELD
;
842 static uint16_t nvme_dma(NvmeCtrl
*n
, uint8_t *ptr
, uint32_t len
,
843 DMADirection dir
, NvmeRequest
*req
)
845 uint16_t status
= NVME_SUCCESS
;
847 status
= nvme_map_dptr(n
, len
, req
);
852 /* assert that only one of qsg and iov carries data */
853 assert((req
->qsg
.nsg
> 0) != (req
->iov
.niov
> 0));
855 if (req
->qsg
.nsg
> 0) {
858 if (dir
== DMA_DIRECTION_TO_DEVICE
) {
859 residual
= dma_buf_write(ptr
, len
, &req
->qsg
);
861 residual
= dma_buf_read(ptr
, len
, &req
->qsg
);
864 if (unlikely(residual
)) {
865 trace_pci_nvme_err_invalid_dma();
866 status
= NVME_INVALID_FIELD
| NVME_DNR
;
871 if (dir
== DMA_DIRECTION_TO_DEVICE
) {
872 bytes
= qemu_iovec_to_buf(&req
->iov
, 0, ptr
, len
);
874 bytes
= qemu_iovec_from_buf(&req
->iov
, 0, ptr
, len
);
877 if (unlikely(bytes
!= len
)) {
878 trace_pci_nvme_err_invalid_dma();
879 status
= NVME_INVALID_FIELD
| NVME_DNR
;
886 static void nvme_post_cqes(void *opaque
)
888 NvmeCQueue
*cq
= opaque
;
889 NvmeCtrl
*n
= cq
->ctrl
;
890 NvmeRequest
*req
, *next
;
893 QTAILQ_FOREACH_SAFE(req
, &cq
->req_list
, entry
, next
) {
897 if (nvme_cq_full(cq
)) {
902 req
->cqe
.status
= cpu_to_le16((req
->status
<< 1) | cq
->phase
);
903 req
->cqe
.sq_id
= cpu_to_le16(sq
->sqid
);
904 req
->cqe
.sq_head
= cpu_to_le16(sq
->head
);
905 addr
= cq
->dma_addr
+ cq
->tail
* n
->cqe_size
;
906 ret
= pci_dma_write(&n
->parent_obj
, addr
, (void *)&req
->cqe
,
909 trace_pci_nvme_err_addr_write(addr
);
910 trace_pci_nvme_err_cfs();
911 n
->bar
.csts
= NVME_CSTS_FAILED
;
914 QTAILQ_REMOVE(&cq
->req_list
, req
, entry
);
915 nvme_inc_cq_tail(cq
);
917 QTAILQ_INSERT_TAIL(&sq
->req_list
, req
, entry
);
919 if (cq
->tail
!= cq
->head
) {
920 nvme_irq_assert(n
, cq
);
924 static void nvme_enqueue_req_completion(NvmeCQueue
*cq
, NvmeRequest
*req
)
926 assert(cq
->cqid
== req
->sq
->cqid
);
927 trace_pci_nvme_enqueue_req_completion(nvme_cid(req
), cq
->cqid
,
931 trace_pci_nvme_err_req_status(nvme_cid(req
), nvme_nsid(req
->ns
),
932 req
->status
, req
->cmd
.opcode
);
935 QTAILQ_REMOVE(&req
->sq
->out_req_list
, req
, entry
);
936 QTAILQ_INSERT_TAIL(&cq
->req_list
, req
, entry
);
937 timer_mod(cq
->timer
, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) + 500);
940 static void nvme_process_aers(void *opaque
)
942 NvmeCtrl
*n
= opaque
;
943 NvmeAsyncEvent
*event
, *next
;
945 trace_pci_nvme_process_aers(n
->aer_queued
);
947 QTAILQ_FOREACH_SAFE(event
, &n
->aer_queue
, entry
, next
) {
949 NvmeAerResult
*result
;
951 /* can't post cqe if there is nothing to complete */
952 if (!n
->outstanding_aers
) {
953 trace_pci_nvme_no_outstanding_aers();
957 /* ignore if masked (cqe posted, but event not cleared) */
958 if (n
->aer_mask
& (1 << event
->result
.event_type
)) {
959 trace_pci_nvme_aer_masked(event
->result
.event_type
, n
->aer_mask
);
963 QTAILQ_REMOVE(&n
->aer_queue
, event
, entry
);
966 n
->aer_mask
|= 1 << event
->result
.event_type
;
967 n
->outstanding_aers
--;
969 req
= n
->aer_reqs
[n
->outstanding_aers
];
971 result
= (NvmeAerResult
*) &req
->cqe
.result
;
972 result
->event_type
= event
->result
.event_type
;
973 result
->event_info
= event
->result
.event_info
;
974 result
->log_page
= event
->result
.log_page
;
977 trace_pci_nvme_aer_post_cqe(result
->event_type
, result
->event_info
,
980 nvme_enqueue_req_completion(&n
->admin_cq
, req
);
984 static void nvme_enqueue_event(NvmeCtrl
*n
, uint8_t event_type
,
985 uint8_t event_info
, uint8_t log_page
)
987 NvmeAsyncEvent
*event
;
989 trace_pci_nvme_enqueue_event(event_type
, event_info
, log_page
);
991 if (n
->aer_queued
== n
->params
.aer_max_queued
) {
992 trace_pci_nvme_enqueue_event_noqueue(n
->aer_queued
);
996 event
= g_new(NvmeAsyncEvent
, 1);
997 event
->result
= (NvmeAerResult
) {
998 .event_type
= event_type
,
999 .event_info
= event_info
,
1000 .log_page
= log_page
,
1003 QTAILQ_INSERT_TAIL(&n
->aer_queue
, event
, entry
);
1006 nvme_process_aers(n
);
1009 static void nvme_smart_event(NvmeCtrl
*n
, uint8_t event
)
1013 /* Ref SPEC <Asynchronous Event Information 0x2013 SMART / Health Status> */
1014 if (!(NVME_AEC_SMART(n
->features
.async_config
) & event
)) {
1019 case NVME_SMART_SPARE
:
1020 aer_info
= NVME_AER_INFO_SMART_SPARE_THRESH
;
1022 case NVME_SMART_TEMPERATURE
:
1023 aer_info
= NVME_AER_INFO_SMART_TEMP_THRESH
;
1025 case NVME_SMART_RELIABILITY
:
1026 case NVME_SMART_MEDIA_READ_ONLY
:
1027 case NVME_SMART_FAILED_VOLATILE_MEDIA
:
1028 case NVME_SMART_PMR_UNRELIABLE
:
1029 aer_info
= NVME_AER_INFO_SMART_RELIABILITY
;
1035 nvme_enqueue_event(n
, NVME_AER_TYPE_SMART
, aer_info
, NVME_LOG_SMART_INFO
);
1038 static void nvme_clear_events(NvmeCtrl
*n
, uint8_t event_type
)
1040 n
->aer_mask
&= ~(1 << event_type
);
1041 if (!QTAILQ_EMPTY(&n
->aer_queue
)) {
1042 nvme_process_aers(n
);
1046 static inline uint16_t nvme_check_mdts(NvmeCtrl
*n
, size_t len
)
1048 uint8_t mdts
= n
->params
.mdts
;
1050 if (mdts
&& len
> n
->page_size
<< mdts
) {
1051 return NVME_INVALID_FIELD
| NVME_DNR
;
1054 return NVME_SUCCESS
;
1057 static inline uint16_t nvme_check_bounds(NvmeNamespace
*ns
, uint64_t slba
,
1060 uint64_t nsze
= le64_to_cpu(ns
->id_ns
.nsze
);
1062 if (unlikely(UINT64_MAX
- slba
< nlb
|| slba
+ nlb
> nsze
)) {
1063 return NVME_LBA_RANGE
| NVME_DNR
;
1066 return NVME_SUCCESS
;
1069 static uint16_t nvme_check_dulbe(NvmeNamespace
*ns
, uint64_t slba
,
1072 BlockDriverState
*bs
= blk_bs(ns
->blkconf
.blk
);
1074 int64_t pnum
= 0, bytes
= nvme_l2b(ns
, nlb
);
1075 int64_t offset
= nvme_l2b(ns
, slba
);
1079 Error
*local_err
= NULL
;
1082 * `pnum` holds the number of bytes after offset that shares the same
1083 * allocation status as the byte at offset. If `pnum` is different from
1084 * `bytes`, we should check the allocation status of the next range and
1085 * continue this until all bytes have been checked.
1090 ret
= bdrv_block_status(bs
, offset
, bytes
, &pnum
, NULL
, NULL
);
1092 error_setg_errno(&local_err
, -ret
, "unable to get block status");
1093 error_report_err(local_err
);
1095 return NVME_INTERNAL_DEV_ERROR
;
1098 zeroed
= !!(ret
& BDRV_BLOCK_ZERO
);
1100 trace_pci_nvme_block_status(offset
, bytes
, pnum
, ret
, zeroed
);
1107 } while (pnum
!= bytes
);
1109 return NVME_SUCCESS
;
1112 static void nvme_aio_err(NvmeRequest
*req
, int ret
)
1114 uint16_t status
= NVME_SUCCESS
;
1115 Error
*local_err
= NULL
;
1117 switch (req
->cmd
.opcode
) {
1119 status
= NVME_UNRECOVERED_READ
;
1121 case NVME_CMD_FLUSH
:
1122 case NVME_CMD_WRITE
:
1123 case NVME_CMD_WRITE_ZEROES
:
1124 case NVME_CMD_ZONE_APPEND
:
1125 status
= NVME_WRITE_FAULT
;
1128 status
= NVME_INTERNAL_DEV_ERROR
;
1132 trace_pci_nvme_err_aio(nvme_cid(req
), strerror(ret
), status
);
1134 error_setg_errno(&local_err
, -ret
, "aio failed");
1135 error_report_err(local_err
);
1138 * Set the command status code to the first encountered error but allow a
1139 * subsequent Internal Device Error to trump it.
1141 if (req
->status
&& status
!= NVME_INTERNAL_DEV_ERROR
) {
1145 req
->status
= status
;
1148 static inline uint32_t nvme_zone_idx(NvmeNamespace
*ns
, uint64_t slba
)
1150 return ns
->zone_size_log2
> 0 ? slba
>> ns
->zone_size_log2
:
1151 slba
/ ns
->zone_size
;
1154 static inline NvmeZone
*nvme_get_zone_by_slba(NvmeNamespace
*ns
, uint64_t slba
)
1156 uint32_t zone_idx
= nvme_zone_idx(ns
, slba
);
1158 assert(zone_idx
< ns
->num_zones
);
1159 return &ns
->zone_array
[zone_idx
];
1162 static uint16_t nvme_check_zone_state_for_write(NvmeZone
*zone
)
1164 uint64_t zslba
= zone
->d
.zslba
;
1166 switch (nvme_get_zone_state(zone
)) {
1167 case NVME_ZONE_STATE_EMPTY
:
1168 case NVME_ZONE_STATE_IMPLICITLY_OPEN
:
1169 case NVME_ZONE_STATE_EXPLICITLY_OPEN
:
1170 case NVME_ZONE_STATE_CLOSED
:
1171 return NVME_SUCCESS
;
1172 case NVME_ZONE_STATE_FULL
:
1173 trace_pci_nvme_err_zone_is_full(zslba
);
1174 return NVME_ZONE_FULL
;
1175 case NVME_ZONE_STATE_OFFLINE
:
1176 trace_pci_nvme_err_zone_is_offline(zslba
);
1177 return NVME_ZONE_OFFLINE
;
1178 case NVME_ZONE_STATE_READ_ONLY
:
1179 trace_pci_nvme_err_zone_is_read_only(zslba
);
1180 return NVME_ZONE_READ_ONLY
;
1185 return NVME_INTERNAL_DEV_ERROR
;
1188 static uint16_t nvme_check_zone_write(NvmeCtrl
*n
, NvmeNamespace
*ns
,
1189 NvmeZone
*zone
, uint64_t slba
,
1192 uint64_t zcap
= nvme_zone_wr_boundary(zone
);
1195 status
= nvme_check_zone_state_for_write(zone
);
1200 if (unlikely(slba
!= zone
->w_ptr
)) {
1201 trace_pci_nvme_err_write_not_at_wp(slba
, zone
->d
.zslba
, zone
->w_ptr
);
1202 return NVME_ZONE_INVALID_WRITE
;
1205 if (unlikely((slba
+ nlb
) > zcap
)) {
1206 trace_pci_nvme_err_zone_boundary(slba
, nlb
, zcap
);
1207 return NVME_ZONE_BOUNDARY_ERROR
;
1210 return NVME_SUCCESS
;
1213 static uint16_t nvme_check_zone_state_for_read(NvmeZone
*zone
)
1217 switch (nvme_get_zone_state(zone
)) {
1218 case NVME_ZONE_STATE_EMPTY
:
1219 case NVME_ZONE_STATE_IMPLICITLY_OPEN
:
1220 case NVME_ZONE_STATE_EXPLICITLY_OPEN
:
1221 case NVME_ZONE_STATE_FULL
:
1222 case NVME_ZONE_STATE_CLOSED
:
1223 case NVME_ZONE_STATE_READ_ONLY
:
1224 status
= NVME_SUCCESS
;
1226 case NVME_ZONE_STATE_OFFLINE
:
1227 status
= NVME_ZONE_OFFLINE
;
1236 static uint16_t nvme_check_zone_read(NvmeNamespace
*ns
, uint64_t slba
,
1239 NvmeZone
*zone
= nvme_get_zone_by_slba(ns
, slba
);
1240 uint64_t bndry
= nvme_zone_rd_boundary(ns
, zone
);
1241 uint64_t end
= slba
+ nlb
;
1244 status
= nvme_check_zone_state_for_read(zone
);
1247 } else if (unlikely(end
> bndry
)) {
1248 if (!ns
->params
.cross_zone_read
) {
1249 status
= NVME_ZONE_BOUNDARY_ERROR
;
1252 * Read across zone boundary - check that all subsequent
1253 * zones that are being read have an appropriate state.
1257 status
= nvme_check_zone_state_for_read(zone
);
1261 } while (end
> nvme_zone_rd_boundary(ns
, zone
));
1268 static void nvme_auto_transition_zone(NvmeNamespace
*ns
)
1272 if (ns
->params
.max_open_zones
&&
1273 ns
->nr_open_zones
== ns
->params
.max_open_zones
) {
1274 zone
= QTAILQ_FIRST(&ns
->imp_open_zones
);
1277 * Automatically close this implicitly open zone.
1279 QTAILQ_REMOVE(&ns
->imp_open_zones
, zone
, entry
);
1280 nvme_aor_dec_open(ns
);
1281 nvme_assign_zone_state(ns
, zone
, NVME_ZONE_STATE_CLOSED
);
1286 static uint16_t nvme_auto_open_zone(NvmeNamespace
*ns
, NvmeZone
*zone
)
1288 uint16_t status
= NVME_SUCCESS
;
1289 uint8_t zs
= nvme_get_zone_state(zone
);
1291 if (zs
== NVME_ZONE_STATE_EMPTY
) {
1292 nvme_auto_transition_zone(ns
);
1293 status
= nvme_aor_check(ns
, 1, 1);
1294 } else if (zs
== NVME_ZONE_STATE_CLOSED
) {
1295 nvme_auto_transition_zone(ns
);
1296 status
= nvme_aor_check(ns
, 0, 1);
1302 static void nvme_finalize_zoned_write(NvmeNamespace
*ns
, NvmeRequest
*req
,
1305 NvmeRwCmd
*rw
= (NvmeRwCmd
*)&req
->cmd
;
1307 NvmeZonedResult
*res
= (NvmeZonedResult
*)&req
->cqe
;
1311 slba
= le64_to_cpu(rw
->slba
);
1312 nlb
= le16_to_cpu(rw
->nlb
) + 1;
1313 zone
= nvme_get_zone_by_slba(ns
, slba
);
1321 if (zone
->d
.wp
== nvme_zone_wr_boundary(zone
)) {
1322 switch (nvme_get_zone_state(zone
)) {
1323 case NVME_ZONE_STATE_IMPLICITLY_OPEN
:
1324 case NVME_ZONE_STATE_EXPLICITLY_OPEN
:
1325 nvme_aor_dec_open(ns
);
1327 case NVME_ZONE_STATE_CLOSED
:
1328 nvme_aor_dec_active(ns
);
1330 case NVME_ZONE_STATE_EMPTY
:
1331 nvme_assign_zone_state(ns
, zone
, NVME_ZONE_STATE_FULL
);
1333 case NVME_ZONE_STATE_FULL
:
1341 static void nvme_advance_zone_wp(NvmeNamespace
*ns
, NvmeZone
*zone
,
1348 if (zone
->w_ptr
< nvme_zone_wr_boundary(zone
)) {
1349 zs
= nvme_get_zone_state(zone
);
1351 case NVME_ZONE_STATE_EMPTY
:
1352 nvme_aor_inc_active(ns
);
1354 case NVME_ZONE_STATE_CLOSED
:
1355 nvme_aor_inc_open(ns
);
1356 nvme_assign_zone_state(ns
, zone
, NVME_ZONE_STATE_IMPLICITLY_OPEN
);
1361 static inline bool nvme_is_write(NvmeRequest
*req
)
1363 NvmeRwCmd
*rw
= (NvmeRwCmd
*)&req
->cmd
;
1365 return rw
->opcode
== NVME_CMD_WRITE
||
1366 rw
->opcode
== NVME_CMD_ZONE_APPEND
||
1367 rw
->opcode
== NVME_CMD_WRITE_ZEROES
;
1370 static void nvme_rw_cb(void *opaque
, int ret
)
1372 NvmeRequest
*req
= opaque
;
1373 NvmeNamespace
*ns
= req
->ns
;
1375 BlockBackend
*blk
= ns
->blkconf
.blk
;
1376 BlockAcctCookie
*acct
= &req
->acct
;
1377 BlockAcctStats
*stats
= blk_get_stats(blk
);
1379 trace_pci_nvme_rw_cb(nvme_cid(req
), blk_name(blk
));
1381 if (ns
->params
.zoned
&& nvme_is_write(req
)) {
1382 nvme_finalize_zoned_write(ns
, req
, ret
!= 0);
1386 block_acct_done(stats
, acct
);
1388 block_acct_failed(stats
, acct
);
1389 nvme_aio_err(req
, ret
);
1392 nvme_enqueue_req_completion(nvme_cq(req
), req
);
1395 static void nvme_aio_discard_cb(void *opaque
, int ret
)
1397 NvmeRequest
*req
= opaque
;
1398 uintptr_t *discards
= (uintptr_t *)&req
->opaque
;
1400 trace_pci_nvme_aio_discard_cb(nvme_cid(req
));
1403 nvme_aio_err(req
, ret
);
1412 nvme_enqueue_req_completion(nvme_cq(req
), req
);
1415 struct nvme_zone_reset_ctx
{
1420 static void nvme_aio_zone_reset_cb(void *opaque
, int ret
)
1422 struct nvme_zone_reset_ctx
*ctx
= opaque
;
1423 NvmeRequest
*req
= ctx
->req
;
1424 NvmeNamespace
*ns
= req
->ns
;
1425 NvmeZone
*zone
= ctx
->zone
;
1426 uintptr_t *resets
= (uintptr_t *)&req
->opaque
;
1430 trace_pci_nvme_aio_zone_reset_cb(nvme_cid(req
), zone
->d
.zslba
);
1433 switch (nvme_get_zone_state(zone
)) {
1434 case NVME_ZONE_STATE_EXPLICITLY_OPEN
:
1435 case NVME_ZONE_STATE_IMPLICITLY_OPEN
:
1436 nvme_aor_dec_open(ns
);
1438 case NVME_ZONE_STATE_CLOSED
:
1439 nvme_aor_dec_active(ns
);
1441 case NVME_ZONE_STATE_FULL
:
1442 zone
->w_ptr
= zone
->d
.zslba
;
1443 zone
->d
.wp
= zone
->w_ptr
;
1444 nvme_assign_zone_state(ns
, zone
, NVME_ZONE_STATE_EMPTY
);
1450 nvme_aio_err(req
, ret
);
1459 nvme_enqueue_req_completion(nvme_cq(req
), req
);
1462 struct nvme_compare_ctx
{
1468 static void nvme_compare_cb(void *opaque
, int ret
)
1470 NvmeRequest
*req
= opaque
;
1471 NvmeNamespace
*ns
= req
->ns
;
1472 struct nvme_compare_ctx
*ctx
= req
->opaque
;
1473 g_autofree
uint8_t *buf
= NULL
;
1476 trace_pci_nvme_compare_cb(nvme_cid(req
));
1479 block_acct_done(blk_get_stats(ns
->blkconf
.blk
), &req
->acct
);
1481 block_acct_failed(blk_get_stats(ns
->blkconf
.blk
), &req
->acct
);
1482 nvme_aio_err(req
, ret
);
1486 buf
= g_malloc(ctx
->len
);
1488 status
= nvme_dma(nvme_ctrl(req
), buf
, ctx
->len
, DMA_DIRECTION_TO_DEVICE
,
1491 req
->status
= status
;
1495 if (memcmp(buf
, ctx
->bounce
, ctx
->len
)) {
1496 req
->status
= NVME_CMP_FAILURE
;
1500 qemu_iovec_destroy(&ctx
->iov
);
1501 g_free(ctx
->bounce
);
1504 nvme_enqueue_req_completion(nvme_cq(req
), req
);
1507 static uint16_t nvme_dsm(NvmeCtrl
*n
, NvmeRequest
*req
)
1509 NvmeNamespace
*ns
= req
->ns
;
1510 NvmeDsmCmd
*dsm
= (NvmeDsmCmd
*) &req
->cmd
;
1512 uint32_t attr
= le32_to_cpu(dsm
->attributes
);
1513 uint32_t nr
= (le32_to_cpu(dsm
->nr
) & 0xff) + 1;
1515 uint16_t status
= NVME_SUCCESS
;
1517 trace_pci_nvme_dsm(nvme_cid(req
), nvme_nsid(ns
), nr
, attr
);
1519 if (attr
& NVME_DSMGMT_AD
) {
1522 NvmeDsmRange range
[nr
];
1523 uintptr_t *discards
= (uintptr_t *)&req
->opaque
;
1525 status
= nvme_dma(n
, (uint8_t *)range
, sizeof(range
),
1526 DMA_DIRECTION_TO_DEVICE
, req
);
1532 * AIO callbacks may be called immediately, so initialize discards to 1
1533 * to make sure the the callback does not complete the request before
1534 * all discards have been issued.
1538 for (int i
= 0; i
< nr
; i
++) {
1539 uint64_t slba
= le64_to_cpu(range
[i
].slba
);
1540 uint32_t nlb
= le32_to_cpu(range
[i
].nlb
);
1542 if (nvme_check_bounds(ns
, slba
, nlb
)) {
1543 trace_pci_nvme_err_invalid_lba_range(slba
, nlb
,
1548 trace_pci_nvme_dsm_deallocate(nvme_cid(req
), nvme_nsid(ns
), slba
,
1551 offset
= nvme_l2b(ns
, slba
);
1552 len
= nvme_l2b(ns
, nlb
);
1555 size_t bytes
= MIN(BDRV_REQUEST_MAX_BYTES
, len
);
1559 blk_aio_pdiscard(ns
->blkconf
.blk
, offset
, bytes
,
1560 nvme_aio_discard_cb
, req
);
1567 /* account for the 1-initialization */
1571 status
= NVME_NO_COMPLETE
;
1573 status
= req
->status
;
1580 static uint16_t nvme_compare(NvmeCtrl
*n
, NvmeRequest
*req
)
1582 NvmeRwCmd
*rw
= (NvmeRwCmd
*)&req
->cmd
;
1583 NvmeNamespace
*ns
= req
->ns
;
1584 BlockBackend
*blk
= ns
->blkconf
.blk
;
1585 uint64_t slba
= le64_to_cpu(rw
->slba
);
1586 uint32_t nlb
= le16_to_cpu(rw
->nlb
) + 1;
1587 size_t len
= nvme_l2b(ns
, nlb
);
1588 int64_t offset
= nvme_l2b(ns
, slba
);
1589 uint8_t *bounce
= NULL
;
1590 struct nvme_compare_ctx
*ctx
= NULL
;
1593 trace_pci_nvme_compare(nvme_cid(req
), nvme_nsid(ns
), slba
, nlb
);
1595 status
= nvme_check_mdts(n
, len
);
1597 trace_pci_nvme_err_mdts(nvme_cid(req
), len
);
1601 status
= nvme_check_bounds(ns
, slba
, nlb
);
1603 trace_pci_nvme_err_invalid_lba_range(slba
, nlb
, ns
->id_ns
.nsze
);
1607 if (NVME_ERR_REC_DULBE(ns
->features
.err_rec
)) {
1608 status
= nvme_check_dulbe(ns
, slba
, nlb
);
1614 bounce
= g_malloc(len
);
1616 ctx
= g_new(struct nvme_compare_ctx
, 1);
1617 ctx
->bounce
= bounce
;
1622 qemu_iovec_init(&ctx
->iov
, 1);
1623 qemu_iovec_add(&ctx
->iov
, bounce
, len
);
1625 block_acct_start(blk_get_stats(blk
), &req
->acct
, len
, BLOCK_ACCT_READ
);
1626 blk_aio_preadv(blk
, offset
, &ctx
->iov
, 0, nvme_compare_cb
, req
);
1628 return NVME_NO_COMPLETE
;
1631 static uint16_t nvme_flush(NvmeCtrl
*n
, NvmeRequest
*req
)
1633 block_acct_start(blk_get_stats(req
->ns
->blkconf
.blk
), &req
->acct
, 0,
1635 req
->aiocb
= blk_aio_flush(req
->ns
->blkconf
.blk
, nvme_rw_cb
, req
);
1636 return NVME_NO_COMPLETE
;
1639 static uint16_t nvme_read(NvmeCtrl
*n
, NvmeRequest
*req
)
1641 NvmeRwCmd
*rw
= (NvmeRwCmd
*)&req
->cmd
;
1642 NvmeNamespace
*ns
= req
->ns
;
1643 uint64_t slba
= le64_to_cpu(rw
->slba
);
1644 uint32_t nlb
= (uint32_t)le16_to_cpu(rw
->nlb
) + 1;
1645 uint64_t data_size
= nvme_l2b(ns
, nlb
);
1646 uint64_t data_offset
;
1647 BlockBackend
*blk
= ns
->blkconf
.blk
;
1650 trace_pci_nvme_read(nvme_cid(req
), nvme_nsid(ns
), nlb
, data_size
, slba
);
1652 status
= nvme_check_mdts(n
, data_size
);
1654 trace_pci_nvme_err_mdts(nvme_cid(req
), data_size
);
1658 status
= nvme_check_bounds(ns
, slba
, nlb
);
1660 trace_pci_nvme_err_invalid_lba_range(slba
, nlb
, ns
->id_ns
.nsze
);
1664 if (ns
->params
.zoned
) {
1665 status
= nvme_check_zone_read(ns
, slba
, nlb
);
1667 trace_pci_nvme_err_zone_read_not_ok(slba
, nlb
, status
);
1672 status
= nvme_map_dptr(n
, data_size
, req
);
1677 if (NVME_ERR_REC_DULBE(ns
->features
.err_rec
)) {
1678 status
= nvme_check_dulbe(ns
, slba
, nlb
);
1684 data_offset
= nvme_l2b(ns
, slba
);
1686 block_acct_start(blk_get_stats(blk
), &req
->acct
, data_size
,
1689 req
->aiocb
= dma_blk_read(blk
, &req
->qsg
, data_offset
,
1690 BDRV_SECTOR_SIZE
, nvme_rw_cb
, req
);
1692 req
->aiocb
= blk_aio_preadv(blk
, data_offset
, &req
->iov
, 0,
1695 return NVME_NO_COMPLETE
;
1698 block_acct_invalid(blk_get_stats(blk
), BLOCK_ACCT_READ
);
1699 return status
| NVME_DNR
;
1702 static uint16_t nvme_do_write(NvmeCtrl
*n
, NvmeRequest
*req
, bool append
,
1705 NvmeRwCmd
*rw
= (NvmeRwCmd
*)&req
->cmd
;
1706 NvmeNamespace
*ns
= req
->ns
;
1707 uint64_t slba
= le64_to_cpu(rw
->slba
);
1708 uint32_t nlb
= (uint32_t)le16_to_cpu(rw
->nlb
) + 1;
1709 uint64_t data_size
= nvme_l2b(ns
, nlb
);
1710 uint64_t data_offset
;
1712 NvmeZonedResult
*res
= (NvmeZonedResult
*)&req
->cqe
;
1713 BlockBackend
*blk
= ns
->blkconf
.blk
;
1716 trace_pci_nvme_write(nvme_cid(req
), nvme_io_opc_str(rw
->opcode
),
1717 nvme_nsid(ns
), nlb
, data_size
, slba
);
1720 status
= nvme_check_mdts(n
, data_size
);
1722 trace_pci_nvme_err_mdts(nvme_cid(req
), data_size
);
1727 status
= nvme_check_bounds(ns
, slba
, nlb
);
1729 trace_pci_nvme_err_invalid_lba_range(slba
, nlb
, ns
->id_ns
.nsze
);
1733 if (ns
->params
.zoned
) {
1734 zone
= nvme_get_zone_by_slba(ns
, slba
);
1737 if (unlikely(slba
!= zone
->d
.zslba
)) {
1738 trace_pci_nvme_err_append_not_at_start(slba
, zone
->d
.zslba
);
1739 status
= NVME_INVALID_FIELD
;
1743 if (nvme_l2b(ns
, nlb
) > (n
->page_size
<< n
->zasl
)) {
1744 trace_pci_nvme_err_append_too_large(slba
, nlb
, n
->zasl
);
1745 status
= NVME_INVALID_FIELD
;
1750 res
->slba
= cpu_to_le64(slba
);
1753 status
= nvme_check_zone_write(n
, ns
, zone
, slba
, nlb
);
1758 status
= nvme_auto_open_zone(ns
, zone
);
1763 nvme_advance_zone_wp(ns
, zone
, nlb
);
1766 data_offset
= nvme_l2b(ns
, slba
);
1769 status
= nvme_map_dptr(n
, data_size
, req
);
1774 block_acct_start(blk_get_stats(blk
), &req
->acct
, data_size
,
1777 req
->aiocb
= dma_blk_write(blk
, &req
->qsg
, data_offset
,
1778 BDRV_SECTOR_SIZE
, nvme_rw_cb
, req
);
1780 req
->aiocb
= blk_aio_pwritev(blk
, data_offset
, &req
->iov
, 0,
1784 block_acct_start(blk_get_stats(blk
), &req
->acct
, 0, BLOCK_ACCT_WRITE
);
1785 req
->aiocb
= blk_aio_pwrite_zeroes(blk
, data_offset
, data_size
,
1786 BDRV_REQ_MAY_UNMAP
, nvme_rw_cb
,
1789 return NVME_NO_COMPLETE
;
1792 block_acct_invalid(blk_get_stats(blk
), BLOCK_ACCT_WRITE
);
1793 return status
| NVME_DNR
;
1796 static inline uint16_t nvme_write(NvmeCtrl
*n
, NvmeRequest
*req
)
1798 return nvme_do_write(n
, req
, false, false);
1801 static inline uint16_t nvme_write_zeroes(NvmeCtrl
*n
, NvmeRequest
*req
)
1803 return nvme_do_write(n
, req
, false, true);
1806 static inline uint16_t nvme_zone_append(NvmeCtrl
*n
, NvmeRequest
*req
)
1808 return nvme_do_write(n
, req
, true, false);
1811 static uint16_t nvme_get_mgmt_zone_slba_idx(NvmeNamespace
*ns
, NvmeCmd
*c
,
1812 uint64_t *slba
, uint32_t *zone_idx
)
1814 uint32_t dw10
= le32_to_cpu(c
->cdw10
);
1815 uint32_t dw11
= le32_to_cpu(c
->cdw11
);
1817 if (!ns
->params
.zoned
) {
1818 trace_pci_nvme_err_invalid_opc(c
->opcode
);
1819 return NVME_INVALID_OPCODE
| NVME_DNR
;
1822 *slba
= ((uint64_t)dw11
) << 32 | dw10
;
1823 if (unlikely(*slba
>= ns
->id_ns
.nsze
)) {
1824 trace_pci_nvme_err_invalid_lba_range(*slba
, 0, ns
->id_ns
.nsze
);
1826 return NVME_LBA_RANGE
| NVME_DNR
;
1829 *zone_idx
= nvme_zone_idx(ns
, *slba
);
1830 assert(*zone_idx
< ns
->num_zones
);
1832 return NVME_SUCCESS
;
1835 typedef uint16_t (*op_handler_t
)(NvmeNamespace
*, NvmeZone
*, NvmeZoneState
,
1838 enum NvmeZoneProcessingMask
{
1839 NVME_PROC_CURRENT_ZONE
= 0,
1840 NVME_PROC_OPENED_ZONES
= 1 << 0,
1841 NVME_PROC_CLOSED_ZONES
= 1 << 1,
1842 NVME_PROC_READ_ONLY_ZONES
= 1 << 2,
1843 NVME_PROC_FULL_ZONES
= 1 << 3,
1846 static uint16_t nvme_open_zone(NvmeNamespace
*ns
, NvmeZone
*zone
,
1847 NvmeZoneState state
, NvmeRequest
*req
)
1852 case NVME_ZONE_STATE_EMPTY
:
1853 status
= nvme_aor_check(ns
, 1, 0);
1857 nvme_aor_inc_active(ns
);
1859 case NVME_ZONE_STATE_CLOSED
:
1860 status
= nvme_aor_check(ns
, 0, 1);
1862 if (state
== NVME_ZONE_STATE_EMPTY
) {
1863 nvme_aor_dec_active(ns
);
1867 nvme_aor_inc_open(ns
);
1869 case NVME_ZONE_STATE_IMPLICITLY_OPEN
:
1870 nvme_assign_zone_state(ns
, zone
, NVME_ZONE_STATE_EXPLICITLY_OPEN
);
1872 case NVME_ZONE_STATE_EXPLICITLY_OPEN
:
1873 return NVME_SUCCESS
;
1875 return NVME_ZONE_INVAL_TRANSITION
;
1879 static uint16_t nvme_close_zone(NvmeNamespace
*ns
, NvmeZone
*zone
,
1880 NvmeZoneState state
, NvmeRequest
*req
)
1883 case NVME_ZONE_STATE_EXPLICITLY_OPEN
:
1884 case NVME_ZONE_STATE_IMPLICITLY_OPEN
:
1885 nvme_aor_dec_open(ns
);
1886 nvme_assign_zone_state(ns
, zone
, NVME_ZONE_STATE_CLOSED
);
1888 case NVME_ZONE_STATE_CLOSED
:
1889 return NVME_SUCCESS
;
1891 return NVME_ZONE_INVAL_TRANSITION
;
1895 static uint16_t nvme_finish_zone(NvmeNamespace
*ns
, NvmeZone
*zone
,
1896 NvmeZoneState state
, NvmeRequest
*req
)
1899 case NVME_ZONE_STATE_EXPLICITLY_OPEN
:
1900 case NVME_ZONE_STATE_IMPLICITLY_OPEN
:
1901 nvme_aor_dec_open(ns
);
1903 case NVME_ZONE_STATE_CLOSED
:
1904 nvme_aor_dec_active(ns
);
1906 case NVME_ZONE_STATE_EMPTY
:
1907 zone
->w_ptr
= nvme_zone_wr_boundary(zone
);
1908 zone
->d
.wp
= zone
->w_ptr
;
1909 nvme_assign_zone_state(ns
, zone
, NVME_ZONE_STATE_FULL
);
1911 case NVME_ZONE_STATE_FULL
:
1912 return NVME_SUCCESS
;
1914 return NVME_ZONE_INVAL_TRANSITION
;
1918 static uint16_t nvme_reset_zone(NvmeNamespace
*ns
, NvmeZone
*zone
,
1919 NvmeZoneState state
, NvmeRequest
*req
)
1921 uintptr_t *resets
= (uintptr_t *)&req
->opaque
;
1922 struct nvme_zone_reset_ctx
*ctx
;
1925 case NVME_ZONE_STATE_EMPTY
:
1926 return NVME_SUCCESS
;
1927 case NVME_ZONE_STATE_EXPLICITLY_OPEN
:
1928 case NVME_ZONE_STATE_IMPLICITLY_OPEN
:
1929 case NVME_ZONE_STATE_CLOSED
:
1930 case NVME_ZONE_STATE_FULL
:
1933 return NVME_ZONE_INVAL_TRANSITION
;
1937 * The zone reset aio callback needs to know the zone that is being reset
1938 * in order to transition the zone on completion.
1940 ctx
= g_new(struct nvme_zone_reset_ctx
, 1);
1946 blk_aio_pwrite_zeroes(ns
->blkconf
.blk
, nvme_l2b(ns
, zone
->d
.zslba
),
1947 nvme_l2b(ns
, ns
->zone_size
), BDRV_REQ_MAY_UNMAP
,
1948 nvme_aio_zone_reset_cb
, ctx
);
1950 return NVME_NO_COMPLETE
;
1953 static uint16_t nvme_offline_zone(NvmeNamespace
*ns
, NvmeZone
*zone
,
1954 NvmeZoneState state
, NvmeRequest
*req
)
1957 case NVME_ZONE_STATE_READ_ONLY
:
1958 nvme_assign_zone_state(ns
, zone
, NVME_ZONE_STATE_OFFLINE
);
1960 case NVME_ZONE_STATE_OFFLINE
:
1961 return NVME_SUCCESS
;
1963 return NVME_ZONE_INVAL_TRANSITION
;
1967 static uint16_t nvme_set_zd_ext(NvmeNamespace
*ns
, NvmeZone
*zone
)
1970 uint8_t state
= nvme_get_zone_state(zone
);
1972 if (state
== NVME_ZONE_STATE_EMPTY
) {
1973 status
= nvme_aor_check(ns
, 1, 0);
1977 nvme_aor_inc_active(ns
);
1978 zone
->d
.za
|= NVME_ZA_ZD_EXT_VALID
;
1979 nvme_assign_zone_state(ns
, zone
, NVME_ZONE_STATE_CLOSED
);
1980 return NVME_SUCCESS
;
1983 return NVME_ZONE_INVAL_TRANSITION
;
1986 static uint16_t nvme_bulk_proc_zone(NvmeNamespace
*ns
, NvmeZone
*zone
,
1987 enum NvmeZoneProcessingMask proc_mask
,
1988 op_handler_t op_hndlr
, NvmeRequest
*req
)
1990 uint16_t status
= NVME_SUCCESS
;
1991 NvmeZoneState zs
= nvme_get_zone_state(zone
);
1995 case NVME_ZONE_STATE_IMPLICITLY_OPEN
:
1996 case NVME_ZONE_STATE_EXPLICITLY_OPEN
:
1997 proc_zone
= proc_mask
& NVME_PROC_OPENED_ZONES
;
1999 case NVME_ZONE_STATE_CLOSED
:
2000 proc_zone
= proc_mask
& NVME_PROC_CLOSED_ZONES
;
2002 case NVME_ZONE_STATE_READ_ONLY
:
2003 proc_zone
= proc_mask
& NVME_PROC_READ_ONLY_ZONES
;
2005 case NVME_ZONE_STATE_FULL
:
2006 proc_zone
= proc_mask
& NVME_PROC_FULL_ZONES
;
2013 status
= op_hndlr(ns
, zone
, zs
, req
);
2019 static uint16_t nvme_do_zone_op(NvmeNamespace
*ns
, NvmeZone
*zone
,
2020 enum NvmeZoneProcessingMask proc_mask
,
2021 op_handler_t op_hndlr
, NvmeRequest
*req
)
2024 uint16_t status
= NVME_SUCCESS
;
2028 status
= op_hndlr(ns
, zone
, nvme_get_zone_state(zone
), req
);
2030 if (proc_mask
& NVME_PROC_CLOSED_ZONES
) {
2031 QTAILQ_FOREACH_SAFE(zone
, &ns
->closed_zones
, entry
, next
) {
2032 status
= nvme_bulk_proc_zone(ns
, zone
, proc_mask
, op_hndlr
,
2034 if (status
&& status
!= NVME_NO_COMPLETE
) {
2039 if (proc_mask
& NVME_PROC_OPENED_ZONES
) {
2040 QTAILQ_FOREACH_SAFE(zone
, &ns
->imp_open_zones
, entry
, next
) {
2041 status
= nvme_bulk_proc_zone(ns
, zone
, proc_mask
, op_hndlr
,
2043 if (status
&& status
!= NVME_NO_COMPLETE
) {
2048 QTAILQ_FOREACH_SAFE(zone
, &ns
->exp_open_zones
, entry
, next
) {
2049 status
= nvme_bulk_proc_zone(ns
, zone
, proc_mask
, op_hndlr
,
2051 if (status
&& status
!= NVME_NO_COMPLETE
) {
2056 if (proc_mask
& NVME_PROC_FULL_ZONES
) {
2057 QTAILQ_FOREACH_SAFE(zone
, &ns
->full_zones
, entry
, next
) {
2058 status
= nvme_bulk_proc_zone(ns
, zone
, proc_mask
, op_hndlr
,
2060 if (status
&& status
!= NVME_NO_COMPLETE
) {
2066 if (proc_mask
& NVME_PROC_READ_ONLY_ZONES
) {
2067 for (i
= 0; i
< ns
->num_zones
; i
++, zone
++) {
2068 status
= nvme_bulk_proc_zone(ns
, zone
, proc_mask
, op_hndlr
,
2070 if (status
&& status
!= NVME_NO_COMPLETE
) {
2081 static uint16_t nvme_zone_mgmt_send(NvmeCtrl
*n
, NvmeRequest
*req
)
2083 NvmeCmd
*cmd
= (NvmeCmd
*)&req
->cmd
;
2084 NvmeNamespace
*ns
= req
->ns
;
2088 uint32_t dw13
= le32_to_cpu(cmd
->cdw13
);
2090 uint32_t zone_idx
= 0;
2094 enum NvmeZoneProcessingMask proc_mask
= NVME_PROC_CURRENT_ZONE
;
2096 action
= dw13
& 0xff;
2099 req
->status
= NVME_SUCCESS
;
2102 status
= nvme_get_mgmt_zone_slba_idx(ns
, cmd
, &slba
, &zone_idx
);
2108 zone
= &ns
->zone_array
[zone_idx
];
2109 if (slba
!= zone
->d
.zslba
) {
2110 trace_pci_nvme_err_unaligned_zone_cmd(action
, slba
, zone
->d
.zslba
);
2111 return NVME_INVALID_FIELD
| NVME_DNR
;
2116 case NVME_ZONE_ACTION_OPEN
:
2118 proc_mask
= NVME_PROC_CLOSED_ZONES
;
2120 trace_pci_nvme_open_zone(slba
, zone_idx
, all
);
2121 status
= nvme_do_zone_op(ns
, zone
, proc_mask
, nvme_open_zone
, req
);
2124 case NVME_ZONE_ACTION_CLOSE
:
2126 proc_mask
= NVME_PROC_OPENED_ZONES
;
2128 trace_pci_nvme_close_zone(slba
, zone_idx
, all
);
2129 status
= nvme_do_zone_op(ns
, zone
, proc_mask
, nvme_close_zone
, req
);
2132 case NVME_ZONE_ACTION_FINISH
:
2134 proc_mask
= NVME_PROC_OPENED_ZONES
| NVME_PROC_CLOSED_ZONES
;
2136 trace_pci_nvme_finish_zone(slba
, zone_idx
, all
);
2137 status
= nvme_do_zone_op(ns
, zone
, proc_mask
, nvme_finish_zone
, req
);
2140 case NVME_ZONE_ACTION_RESET
:
2141 resets
= (uintptr_t *)&req
->opaque
;
2144 proc_mask
= NVME_PROC_OPENED_ZONES
| NVME_PROC_CLOSED_ZONES
|
2145 NVME_PROC_FULL_ZONES
;
2147 trace_pci_nvme_reset_zone(slba
, zone_idx
, all
);
2151 status
= nvme_do_zone_op(ns
, zone
, proc_mask
, nvme_reset_zone
, req
);
2155 return *resets
? NVME_NO_COMPLETE
: req
->status
;
2157 case NVME_ZONE_ACTION_OFFLINE
:
2159 proc_mask
= NVME_PROC_READ_ONLY_ZONES
;
2161 trace_pci_nvme_offline_zone(slba
, zone_idx
, all
);
2162 status
= nvme_do_zone_op(ns
, zone
, proc_mask
, nvme_offline_zone
, req
);
2165 case NVME_ZONE_ACTION_SET_ZD_EXT
:
2166 trace_pci_nvme_set_descriptor_extension(slba
, zone_idx
);
2167 if (all
|| !ns
->params
.zd_extension_size
) {
2168 return NVME_INVALID_FIELD
| NVME_DNR
;
2170 zd_ext
= nvme_get_zd_extension(ns
, zone_idx
);
2171 status
= nvme_dma(n
, zd_ext
, ns
->params
.zd_extension_size
,
2172 DMA_DIRECTION_TO_DEVICE
, req
);
2174 trace_pci_nvme_err_zd_extension_map_error(zone_idx
);
2178 status
= nvme_set_zd_ext(ns
, zone
);
2179 if (status
== NVME_SUCCESS
) {
2180 trace_pci_nvme_zd_extension_set(zone_idx
);
2186 trace_pci_nvme_err_invalid_mgmt_action(action
);
2187 status
= NVME_INVALID_FIELD
;
2190 if (status
== NVME_ZONE_INVAL_TRANSITION
) {
2191 trace_pci_nvme_err_invalid_zone_state_transition(action
, slba
,
2201 static bool nvme_zone_matches_filter(uint32_t zafs
, NvmeZone
*zl
)
2203 NvmeZoneState zs
= nvme_get_zone_state(zl
);
2206 case NVME_ZONE_REPORT_ALL
:
2208 case NVME_ZONE_REPORT_EMPTY
:
2209 return zs
== NVME_ZONE_STATE_EMPTY
;
2210 case NVME_ZONE_REPORT_IMPLICITLY_OPEN
:
2211 return zs
== NVME_ZONE_STATE_IMPLICITLY_OPEN
;
2212 case NVME_ZONE_REPORT_EXPLICITLY_OPEN
:
2213 return zs
== NVME_ZONE_STATE_EXPLICITLY_OPEN
;
2214 case NVME_ZONE_REPORT_CLOSED
:
2215 return zs
== NVME_ZONE_STATE_CLOSED
;
2216 case NVME_ZONE_REPORT_FULL
:
2217 return zs
== NVME_ZONE_STATE_FULL
;
2218 case NVME_ZONE_REPORT_READ_ONLY
:
2219 return zs
== NVME_ZONE_STATE_READ_ONLY
;
2220 case NVME_ZONE_REPORT_OFFLINE
:
2221 return zs
== NVME_ZONE_STATE_OFFLINE
;
2227 static uint16_t nvme_zone_mgmt_recv(NvmeCtrl
*n
, NvmeRequest
*req
)
2229 NvmeCmd
*cmd
= (NvmeCmd
*)&req
->cmd
;
2230 NvmeNamespace
*ns
= req
->ns
;
2231 /* cdw12 is zero-based number of dwords to return. Convert to bytes */
2232 uint32_t data_size
= (le32_to_cpu(cmd
->cdw12
) + 1) << 2;
2233 uint32_t dw13
= le32_to_cpu(cmd
->cdw13
);
2234 uint32_t zone_idx
, zra
, zrasf
, partial
;
2235 uint64_t max_zones
, nr_zones
= 0;
2237 uint64_t slba
, capacity
= nvme_ns_nlbas(ns
);
2240 NvmeZoneReportHeader
*header
;
2242 size_t zone_entry_sz
;
2244 req
->status
= NVME_SUCCESS
;
2246 status
= nvme_get_mgmt_zone_slba_idx(ns
, cmd
, &slba
, &zone_idx
);
2252 if (zra
!= NVME_ZONE_REPORT
&& zra
!= NVME_ZONE_REPORT_EXTENDED
) {
2253 return NVME_INVALID_FIELD
| NVME_DNR
;
2255 if (zra
== NVME_ZONE_REPORT_EXTENDED
&& !ns
->params
.zd_extension_size
) {
2256 return NVME_INVALID_FIELD
| NVME_DNR
;
2259 zrasf
= (dw13
>> 8) & 0xff;
2260 if (zrasf
> NVME_ZONE_REPORT_OFFLINE
) {
2261 return NVME_INVALID_FIELD
| NVME_DNR
;
2264 if (data_size
< sizeof(NvmeZoneReportHeader
)) {
2265 return NVME_INVALID_FIELD
| NVME_DNR
;
2268 status
= nvme_check_mdts(n
, data_size
);
2270 trace_pci_nvme_err_mdts(nvme_cid(req
), data_size
);
2274 partial
= (dw13
>> 16) & 0x01;
2276 zone_entry_sz
= sizeof(NvmeZoneDescr
);
2277 if (zra
== NVME_ZONE_REPORT_EXTENDED
) {
2278 zone_entry_sz
+= ns
->params
.zd_extension_size
;
2281 max_zones
= (data_size
- sizeof(NvmeZoneReportHeader
)) / zone_entry_sz
;
2282 buf
= g_malloc0(data_size
);
2284 zone
= &ns
->zone_array
[zone_idx
];
2285 for (; slba
< capacity
; slba
+= ns
->zone_size
) {
2286 if (partial
&& nr_zones
>= max_zones
) {
2289 if (nvme_zone_matches_filter(zrasf
, zone
++)) {
2293 header
= (NvmeZoneReportHeader
*)buf
;
2294 header
->nr_zones
= cpu_to_le64(nr_zones
);
2296 buf_p
= buf
+ sizeof(NvmeZoneReportHeader
);
2297 for (; zone_idx
< ns
->num_zones
&& max_zones
> 0; zone_idx
++) {
2298 zone
= &ns
->zone_array
[zone_idx
];
2299 if (nvme_zone_matches_filter(zrasf
, zone
)) {
2300 z
= (NvmeZoneDescr
*)buf_p
;
2301 buf_p
+= sizeof(NvmeZoneDescr
);
2305 z
->zcap
= cpu_to_le64(zone
->d
.zcap
);
2306 z
->zslba
= cpu_to_le64(zone
->d
.zslba
);
2309 if (nvme_wp_is_valid(zone
)) {
2310 z
->wp
= cpu_to_le64(zone
->d
.wp
);
2312 z
->wp
= cpu_to_le64(~0ULL);
2315 if (zra
== NVME_ZONE_REPORT_EXTENDED
) {
2316 if (zone
->d
.za
& NVME_ZA_ZD_EXT_VALID
) {
2317 memcpy(buf_p
, nvme_get_zd_extension(ns
, zone_idx
),
2318 ns
->params
.zd_extension_size
);
2320 buf_p
+= ns
->params
.zd_extension_size
;
2327 status
= nvme_dma(n
, (uint8_t *)buf
, data_size
,
2328 DMA_DIRECTION_FROM_DEVICE
, req
);
2335 static uint16_t nvme_io_cmd(NvmeCtrl
*n
, NvmeRequest
*req
)
2337 uint32_t nsid
= le32_to_cpu(req
->cmd
.nsid
);
2339 trace_pci_nvme_io_cmd(nvme_cid(req
), nsid
, nvme_sqid(req
),
2340 req
->cmd
.opcode
, nvme_io_opc_str(req
->cmd
.opcode
));
2342 if (!nvme_nsid_valid(n
, nsid
)) {
2343 return NVME_INVALID_NSID
| NVME_DNR
;
2346 req
->ns
= nvme_ns(n
, nsid
);
2347 if (unlikely(!req
->ns
)) {
2348 return NVME_INVALID_FIELD
| NVME_DNR
;
2351 if (!(req
->ns
->iocs
[req
->cmd
.opcode
] & NVME_CMD_EFF_CSUPP
)) {
2352 trace_pci_nvme_err_invalid_opc(req
->cmd
.opcode
);
2353 return NVME_INVALID_OPCODE
| NVME_DNR
;
2356 switch (req
->cmd
.opcode
) {
2357 case NVME_CMD_FLUSH
:
2358 return nvme_flush(n
, req
);
2359 case NVME_CMD_WRITE_ZEROES
:
2360 return nvme_write_zeroes(n
, req
);
2361 case NVME_CMD_ZONE_APPEND
:
2362 return nvme_zone_append(n
, req
);
2363 case NVME_CMD_WRITE
:
2364 return nvme_write(n
, req
);
2366 return nvme_read(n
, req
);
2367 case NVME_CMD_COMPARE
:
2368 return nvme_compare(n
, req
);
2370 return nvme_dsm(n
, req
);
2371 case NVME_CMD_ZONE_MGMT_SEND
:
2372 return nvme_zone_mgmt_send(n
, req
);
2373 case NVME_CMD_ZONE_MGMT_RECV
:
2374 return nvme_zone_mgmt_recv(n
, req
);
2379 return NVME_INVALID_OPCODE
| NVME_DNR
;
2382 static void nvme_free_sq(NvmeSQueue
*sq
, NvmeCtrl
*n
)
2384 n
->sq
[sq
->sqid
] = NULL
;
2385 timer_free(sq
->timer
);
2392 static uint16_t nvme_del_sq(NvmeCtrl
*n
, NvmeRequest
*req
)
2394 NvmeDeleteQ
*c
= (NvmeDeleteQ
*)&req
->cmd
;
2395 NvmeRequest
*r
, *next
;
2398 uint16_t qid
= le16_to_cpu(c
->qid
);
2400 if (unlikely(!qid
|| nvme_check_sqid(n
, qid
))) {
2401 trace_pci_nvme_err_invalid_del_sq(qid
);
2402 return NVME_INVALID_QID
| NVME_DNR
;
2405 trace_pci_nvme_del_sq(qid
);
2408 while (!QTAILQ_EMPTY(&sq
->out_req_list
)) {
2409 r
= QTAILQ_FIRST(&sq
->out_req_list
);
2411 blk_aio_cancel(r
->aiocb
);
2413 if (!nvme_check_cqid(n
, sq
->cqid
)) {
2414 cq
= n
->cq
[sq
->cqid
];
2415 QTAILQ_REMOVE(&cq
->sq_list
, sq
, entry
);
2418 QTAILQ_FOREACH_SAFE(r
, &cq
->req_list
, entry
, next
) {
2420 QTAILQ_REMOVE(&cq
->req_list
, r
, entry
);
2421 QTAILQ_INSERT_TAIL(&sq
->req_list
, r
, entry
);
2426 nvme_free_sq(sq
, n
);
2427 return NVME_SUCCESS
;
2430 static void nvme_init_sq(NvmeSQueue
*sq
, NvmeCtrl
*n
, uint64_t dma_addr
,
2431 uint16_t sqid
, uint16_t cqid
, uint16_t size
)
2437 sq
->dma_addr
= dma_addr
;
2441 sq
->head
= sq
->tail
= 0;
2442 sq
->io_req
= g_new0(NvmeRequest
, sq
->size
);
2444 QTAILQ_INIT(&sq
->req_list
);
2445 QTAILQ_INIT(&sq
->out_req_list
);
2446 for (i
= 0; i
< sq
->size
; i
++) {
2447 sq
->io_req
[i
].sq
= sq
;
2448 QTAILQ_INSERT_TAIL(&(sq
->req_list
), &sq
->io_req
[i
], entry
);
2450 sq
->timer
= timer_new_ns(QEMU_CLOCK_VIRTUAL
, nvme_process_sq
, sq
);
2452 assert(n
->cq
[cqid
]);
2454 QTAILQ_INSERT_TAIL(&(cq
->sq_list
), sq
, entry
);
2458 static uint16_t nvme_create_sq(NvmeCtrl
*n
, NvmeRequest
*req
)
2461 NvmeCreateSq
*c
= (NvmeCreateSq
*)&req
->cmd
;
2463 uint16_t cqid
= le16_to_cpu(c
->cqid
);
2464 uint16_t sqid
= le16_to_cpu(c
->sqid
);
2465 uint16_t qsize
= le16_to_cpu(c
->qsize
);
2466 uint16_t qflags
= le16_to_cpu(c
->sq_flags
);
2467 uint64_t prp1
= le64_to_cpu(c
->prp1
);
2469 trace_pci_nvme_create_sq(prp1
, sqid
, cqid
, qsize
, qflags
);
2471 if (unlikely(!cqid
|| nvme_check_cqid(n
, cqid
))) {
2472 trace_pci_nvme_err_invalid_create_sq_cqid(cqid
);
2473 return NVME_INVALID_CQID
| NVME_DNR
;
2475 if (unlikely(!sqid
|| sqid
> n
->params
.max_ioqpairs
||
2476 n
->sq
[sqid
] != NULL
)) {
2477 trace_pci_nvme_err_invalid_create_sq_sqid(sqid
);
2478 return NVME_INVALID_QID
| NVME_DNR
;
2480 if (unlikely(!qsize
|| qsize
> NVME_CAP_MQES(n
->bar
.cap
))) {
2481 trace_pci_nvme_err_invalid_create_sq_size(qsize
);
2482 return NVME_MAX_QSIZE_EXCEEDED
| NVME_DNR
;
2484 if (unlikely(prp1
& (n
->page_size
- 1))) {
2485 trace_pci_nvme_err_invalid_create_sq_addr(prp1
);
2486 return NVME_INVALID_PRP_OFFSET
| NVME_DNR
;
2488 if (unlikely(!(NVME_SQ_FLAGS_PC(qflags
)))) {
2489 trace_pci_nvme_err_invalid_create_sq_qflags(NVME_SQ_FLAGS_PC(qflags
));
2490 return NVME_INVALID_FIELD
| NVME_DNR
;
2492 sq
= g_malloc0(sizeof(*sq
));
2493 nvme_init_sq(sq
, n
, prp1
, sqid
, cqid
, qsize
+ 1);
2494 return NVME_SUCCESS
;
2498 uint64_t units_read
;
2499 uint64_t units_written
;
2500 uint64_t read_commands
;
2501 uint64_t write_commands
;
2504 static void nvme_set_blk_stats(NvmeNamespace
*ns
, struct nvme_stats
*stats
)
2506 BlockAcctStats
*s
= blk_get_stats(ns
->blkconf
.blk
);
2508 stats
->units_read
+= s
->nr_bytes
[BLOCK_ACCT_READ
] >> BDRV_SECTOR_BITS
;
2509 stats
->units_written
+= s
->nr_bytes
[BLOCK_ACCT_WRITE
] >> BDRV_SECTOR_BITS
;
2510 stats
->read_commands
+= s
->nr_ops
[BLOCK_ACCT_READ
];
2511 stats
->write_commands
+= s
->nr_ops
[BLOCK_ACCT_WRITE
];
2514 static uint16_t nvme_smart_info(NvmeCtrl
*n
, uint8_t rae
, uint32_t buf_len
,
2515 uint64_t off
, NvmeRequest
*req
)
2517 uint32_t nsid
= le32_to_cpu(req
->cmd
.nsid
);
2518 struct nvme_stats stats
= { 0 };
2519 NvmeSmartLog smart
= { 0 };
2524 if (off
>= sizeof(smart
)) {
2525 return NVME_INVALID_FIELD
| NVME_DNR
;
2528 if (nsid
!= 0xffffffff) {
2529 ns
= nvme_ns(n
, nsid
);
2531 return NVME_INVALID_NSID
| NVME_DNR
;
2533 nvme_set_blk_stats(ns
, &stats
);
2537 for (i
= 1; i
<= n
->num_namespaces
; i
++) {
2542 nvme_set_blk_stats(ns
, &stats
);
2546 trans_len
= MIN(sizeof(smart
) - off
, buf_len
);
2547 smart
.critical_warning
= n
->smart_critical_warning
;
2549 smart
.data_units_read
[0] = cpu_to_le64(DIV_ROUND_UP(stats
.units_read
,
2551 smart
.data_units_written
[0] = cpu_to_le64(DIV_ROUND_UP(stats
.units_written
,
2553 smart
.host_read_commands
[0] = cpu_to_le64(stats
.read_commands
);
2554 smart
.host_write_commands
[0] = cpu_to_le64(stats
.write_commands
);
2556 smart
.temperature
= cpu_to_le16(n
->temperature
);
2558 if ((n
->temperature
>= n
->features
.temp_thresh_hi
) ||
2559 (n
->temperature
<= n
->features
.temp_thresh_low
)) {
2560 smart
.critical_warning
|= NVME_SMART_TEMPERATURE
;
2563 current_ms
= qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
);
2564 smart
.power_on_hours
[0] =
2565 cpu_to_le64((((current_ms
- n
->starttime_ms
) / 1000) / 60) / 60);
2568 nvme_clear_events(n
, NVME_AER_TYPE_SMART
);
2571 return nvme_dma(n
, (uint8_t *) &smart
+ off
, trans_len
,
2572 DMA_DIRECTION_FROM_DEVICE
, req
);
2575 static uint16_t nvme_fw_log_info(NvmeCtrl
*n
, uint32_t buf_len
, uint64_t off
,
2579 NvmeFwSlotInfoLog fw_log
= {
2583 if (off
>= sizeof(fw_log
)) {
2584 return NVME_INVALID_FIELD
| NVME_DNR
;
2587 strpadcpy((char *)&fw_log
.frs1
, sizeof(fw_log
.frs1
), "1.0", ' ');
2588 trans_len
= MIN(sizeof(fw_log
) - off
, buf_len
);
2590 return nvme_dma(n
, (uint8_t *) &fw_log
+ off
, trans_len
,
2591 DMA_DIRECTION_FROM_DEVICE
, req
);
2594 static uint16_t nvme_error_info(NvmeCtrl
*n
, uint8_t rae
, uint32_t buf_len
,
2595 uint64_t off
, NvmeRequest
*req
)
2598 NvmeErrorLog errlog
;
2600 if (off
>= sizeof(errlog
)) {
2601 return NVME_INVALID_FIELD
| NVME_DNR
;
2605 nvme_clear_events(n
, NVME_AER_TYPE_ERROR
);
2608 memset(&errlog
, 0x0, sizeof(errlog
));
2609 trans_len
= MIN(sizeof(errlog
) - off
, buf_len
);
2611 return nvme_dma(n
, (uint8_t *)&errlog
, trans_len
,
2612 DMA_DIRECTION_FROM_DEVICE
, req
);
2615 static uint16_t nvme_cmd_effects(NvmeCtrl
*n
, uint8_t csi
, uint32_t buf_len
,
2616 uint64_t off
, NvmeRequest
*req
)
2618 NvmeEffectsLog log
= {};
2619 const uint32_t *src_iocs
= NULL
;
2622 if (off
>= sizeof(log
)) {
2623 trace_pci_nvme_err_invalid_log_page_offset(off
, sizeof(log
));
2624 return NVME_INVALID_FIELD
| NVME_DNR
;
2627 switch (NVME_CC_CSS(n
->bar
.cc
)) {
2628 case NVME_CC_CSS_NVM
:
2629 src_iocs
= nvme_cse_iocs_nvm
;
2631 case NVME_CC_CSS_ADMIN_ONLY
:
2633 case NVME_CC_CSS_CSI
:
2636 src_iocs
= nvme_cse_iocs_nvm
;
2638 case NVME_CSI_ZONED
:
2639 src_iocs
= nvme_cse_iocs_zoned
;
2644 memcpy(log
.acs
, nvme_cse_acs
, sizeof(nvme_cse_acs
));
2647 memcpy(log
.iocs
, src_iocs
, sizeof(log
.iocs
));
2650 trans_len
= MIN(sizeof(log
) - off
, buf_len
);
2652 return nvme_dma(n
, ((uint8_t *)&log
) + off
, trans_len
,
2653 DMA_DIRECTION_FROM_DEVICE
, req
);
2656 static uint16_t nvme_get_log(NvmeCtrl
*n
, NvmeRequest
*req
)
2658 NvmeCmd
*cmd
= &req
->cmd
;
2660 uint32_t dw10
= le32_to_cpu(cmd
->cdw10
);
2661 uint32_t dw11
= le32_to_cpu(cmd
->cdw11
);
2662 uint32_t dw12
= le32_to_cpu(cmd
->cdw12
);
2663 uint32_t dw13
= le32_to_cpu(cmd
->cdw13
);
2664 uint8_t lid
= dw10
& 0xff;
2665 uint8_t lsp
= (dw10
>> 8) & 0xf;
2666 uint8_t rae
= (dw10
>> 15) & 0x1;
2667 uint8_t csi
= le32_to_cpu(cmd
->cdw14
) >> 24;
2668 uint32_t numdl
, numdu
;
2669 uint64_t off
, lpol
, lpou
;
2673 numdl
= (dw10
>> 16);
2674 numdu
= (dw11
& 0xffff);
2678 len
= (((numdu
<< 16) | numdl
) + 1) << 2;
2679 off
= (lpou
<< 32ULL) | lpol
;
2682 return NVME_INVALID_FIELD
| NVME_DNR
;
2685 trace_pci_nvme_get_log(nvme_cid(req
), lid
, lsp
, rae
, len
, off
);
2687 status
= nvme_check_mdts(n
, len
);
2689 trace_pci_nvme_err_mdts(nvme_cid(req
), len
);
2694 case NVME_LOG_ERROR_INFO
:
2695 return nvme_error_info(n
, rae
, len
, off
, req
);
2696 case NVME_LOG_SMART_INFO
:
2697 return nvme_smart_info(n
, rae
, len
, off
, req
);
2698 case NVME_LOG_FW_SLOT_INFO
:
2699 return nvme_fw_log_info(n
, len
, off
, req
);
2700 case NVME_LOG_CMD_EFFECTS
:
2701 return nvme_cmd_effects(n
, csi
, len
, off
, req
);
2703 trace_pci_nvme_err_invalid_log_page(nvme_cid(req
), lid
);
2704 return NVME_INVALID_FIELD
| NVME_DNR
;
2708 static void nvme_free_cq(NvmeCQueue
*cq
, NvmeCtrl
*n
)
2710 n
->cq
[cq
->cqid
] = NULL
;
2711 timer_free(cq
->timer
);
2712 if (msix_enabled(&n
->parent_obj
)) {
2713 msix_vector_unuse(&n
->parent_obj
, cq
->vector
);
2720 static uint16_t nvme_del_cq(NvmeCtrl
*n
, NvmeRequest
*req
)
2722 NvmeDeleteQ
*c
= (NvmeDeleteQ
*)&req
->cmd
;
2724 uint16_t qid
= le16_to_cpu(c
->qid
);
2726 if (unlikely(!qid
|| nvme_check_cqid(n
, qid
))) {
2727 trace_pci_nvme_err_invalid_del_cq_cqid(qid
);
2728 return NVME_INVALID_CQID
| NVME_DNR
;
2732 if (unlikely(!QTAILQ_EMPTY(&cq
->sq_list
))) {
2733 trace_pci_nvme_err_invalid_del_cq_notempty(qid
);
2734 return NVME_INVALID_QUEUE_DEL
;
2736 nvme_irq_deassert(n
, cq
);
2737 trace_pci_nvme_del_cq(qid
);
2738 nvme_free_cq(cq
, n
);
2739 return NVME_SUCCESS
;
2742 static void nvme_init_cq(NvmeCQueue
*cq
, NvmeCtrl
*n
, uint64_t dma_addr
,
2743 uint16_t cqid
, uint16_t vector
, uint16_t size
,
2744 uint16_t irq_enabled
)
2748 if (msix_enabled(&n
->parent_obj
)) {
2749 ret
= msix_vector_use(&n
->parent_obj
, vector
);
2755 cq
->dma_addr
= dma_addr
;
2757 cq
->irq_enabled
= irq_enabled
;
2758 cq
->vector
= vector
;
2759 cq
->head
= cq
->tail
= 0;
2760 QTAILQ_INIT(&cq
->req_list
);
2761 QTAILQ_INIT(&cq
->sq_list
);
2763 cq
->timer
= timer_new_ns(QEMU_CLOCK_VIRTUAL
, nvme_post_cqes
, cq
);
2766 static uint16_t nvme_create_cq(NvmeCtrl
*n
, NvmeRequest
*req
)
2769 NvmeCreateCq
*c
= (NvmeCreateCq
*)&req
->cmd
;
2770 uint16_t cqid
= le16_to_cpu(c
->cqid
);
2771 uint16_t vector
= le16_to_cpu(c
->irq_vector
);
2772 uint16_t qsize
= le16_to_cpu(c
->qsize
);
2773 uint16_t qflags
= le16_to_cpu(c
->cq_flags
);
2774 uint64_t prp1
= le64_to_cpu(c
->prp1
);
2776 trace_pci_nvme_create_cq(prp1
, cqid
, vector
, qsize
, qflags
,
2777 NVME_CQ_FLAGS_IEN(qflags
) != 0);
2779 if (unlikely(!cqid
|| cqid
> n
->params
.max_ioqpairs
||
2780 n
->cq
[cqid
] != NULL
)) {
2781 trace_pci_nvme_err_invalid_create_cq_cqid(cqid
);
2782 return NVME_INVALID_QID
| NVME_DNR
;
2784 if (unlikely(!qsize
|| qsize
> NVME_CAP_MQES(n
->bar
.cap
))) {
2785 trace_pci_nvme_err_invalid_create_cq_size(qsize
);
2786 return NVME_MAX_QSIZE_EXCEEDED
| NVME_DNR
;
2788 if (unlikely(prp1
& (n
->page_size
- 1))) {
2789 trace_pci_nvme_err_invalid_create_cq_addr(prp1
);
2790 return NVME_INVALID_PRP_OFFSET
| NVME_DNR
;
2792 if (unlikely(!msix_enabled(&n
->parent_obj
) && vector
)) {
2793 trace_pci_nvme_err_invalid_create_cq_vector(vector
);
2794 return NVME_INVALID_IRQ_VECTOR
| NVME_DNR
;
2796 if (unlikely(vector
>= n
->params
.msix_qsize
)) {
2797 trace_pci_nvme_err_invalid_create_cq_vector(vector
);
2798 return NVME_INVALID_IRQ_VECTOR
| NVME_DNR
;
2800 if (unlikely(!(NVME_CQ_FLAGS_PC(qflags
)))) {
2801 trace_pci_nvme_err_invalid_create_cq_qflags(NVME_CQ_FLAGS_PC(qflags
));
2802 return NVME_INVALID_FIELD
| NVME_DNR
;
2805 cq
= g_malloc0(sizeof(*cq
));
2806 nvme_init_cq(cq
, n
, prp1
, cqid
, vector
, qsize
+ 1,
2807 NVME_CQ_FLAGS_IEN(qflags
));
2810 * It is only required to set qs_created when creating a completion queue;
2811 * creating a submission queue without a matching completion queue will
2814 n
->qs_created
= true;
2815 return NVME_SUCCESS
;
2818 static uint16_t nvme_rpt_empty_id_struct(NvmeCtrl
*n
, NvmeRequest
*req
)
2820 uint8_t id
[NVME_IDENTIFY_DATA_SIZE
] = {};
2822 return nvme_dma(n
, id
, sizeof(id
), DMA_DIRECTION_FROM_DEVICE
, req
);
2825 static inline bool nvme_csi_has_nvm_support(NvmeNamespace
*ns
)
2829 case NVME_CSI_ZONED
:
2835 static uint16_t nvme_identify_ctrl(NvmeCtrl
*n
, NvmeRequest
*req
)
2837 trace_pci_nvme_identify_ctrl();
2839 return nvme_dma(n
, (uint8_t *)&n
->id_ctrl
, sizeof(n
->id_ctrl
),
2840 DMA_DIRECTION_FROM_DEVICE
, req
);
2843 static uint16_t nvme_identify_ctrl_csi(NvmeCtrl
*n
, NvmeRequest
*req
)
2845 NvmeIdentify
*c
= (NvmeIdentify
*)&req
->cmd
;
2846 NvmeIdCtrlZoned id
= {};
2848 trace_pci_nvme_identify_ctrl_csi(c
->csi
);
2850 if (c
->csi
== NVME_CSI_NVM
) {
2851 return nvme_rpt_empty_id_struct(n
, req
);
2852 } else if (c
->csi
== NVME_CSI_ZONED
) {
2853 if (n
->params
.zasl_bs
) {
2856 return nvme_dma(n
, (uint8_t *)&id
, sizeof(id
),
2857 DMA_DIRECTION_FROM_DEVICE
, req
);
2860 return NVME_INVALID_FIELD
| NVME_DNR
;
2863 static uint16_t nvme_identify_ns(NvmeCtrl
*n
, NvmeRequest
*req
)
2866 NvmeIdentify
*c
= (NvmeIdentify
*)&req
->cmd
;
2867 uint32_t nsid
= le32_to_cpu(c
->nsid
);
2869 trace_pci_nvme_identify_ns(nsid
);
2871 if (!nvme_nsid_valid(n
, nsid
) || nsid
== NVME_NSID_BROADCAST
) {
2872 return NVME_INVALID_NSID
| NVME_DNR
;
2875 ns
= nvme_ns(n
, nsid
);
2876 if (unlikely(!ns
)) {
2877 return nvme_rpt_empty_id_struct(n
, req
);
2880 if (c
->csi
== NVME_CSI_NVM
&& nvme_csi_has_nvm_support(ns
)) {
2881 return nvme_dma(n
, (uint8_t *)&ns
->id_ns
, sizeof(NvmeIdNs
),
2882 DMA_DIRECTION_FROM_DEVICE
, req
);
2885 return NVME_INVALID_CMD_SET
| NVME_DNR
;
2888 static uint16_t nvme_identify_ns_csi(NvmeCtrl
*n
, NvmeRequest
*req
)
2891 NvmeIdentify
*c
= (NvmeIdentify
*)&req
->cmd
;
2892 uint32_t nsid
= le32_to_cpu(c
->nsid
);
2894 trace_pci_nvme_identify_ns_csi(nsid
, c
->csi
);
2896 if (!nvme_nsid_valid(n
, nsid
) || nsid
== NVME_NSID_BROADCAST
) {
2897 return NVME_INVALID_NSID
| NVME_DNR
;
2900 ns
= nvme_ns(n
, nsid
);
2901 if (unlikely(!ns
)) {
2902 return nvme_rpt_empty_id_struct(n
, req
);
2905 if (c
->csi
== NVME_CSI_NVM
&& nvme_csi_has_nvm_support(ns
)) {
2906 return nvme_rpt_empty_id_struct(n
, req
);
2907 } else if (c
->csi
== NVME_CSI_ZONED
&& ns
->csi
== NVME_CSI_ZONED
) {
2908 return nvme_dma(n
, (uint8_t *)ns
->id_ns_zoned
, sizeof(NvmeIdNsZoned
),
2909 DMA_DIRECTION_FROM_DEVICE
, req
);
2912 return NVME_INVALID_FIELD
| NVME_DNR
;
2915 static uint16_t nvme_identify_nslist(NvmeCtrl
*n
, NvmeRequest
*req
)
2918 NvmeIdentify
*c
= (NvmeIdentify
*)&req
->cmd
;
2919 uint32_t min_nsid
= le32_to_cpu(c
->nsid
);
2920 uint8_t list
[NVME_IDENTIFY_DATA_SIZE
] = {};
2921 static const int data_len
= sizeof(list
);
2922 uint32_t *list_ptr
= (uint32_t *)list
;
2925 trace_pci_nvme_identify_nslist(min_nsid
);
2928 * Both 0xffffffff (NVME_NSID_BROADCAST) and 0xfffffffe are invalid values
2929 * since the Active Namespace ID List should return namespaces with ids
2930 * *higher* than the NSID specified in the command. This is also specified
2931 * in the spec (NVM Express v1.3d, Section 5.15.4).
2933 if (min_nsid
>= NVME_NSID_BROADCAST
- 1) {
2934 return NVME_INVALID_NSID
| NVME_DNR
;
2937 for (i
= 1; i
<= n
->num_namespaces
; i
++) {
2942 if (ns
->params
.nsid
<= min_nsid
) {
2945 list_ptr
[j
++] = cpu_to_le32(ns
->params
.nsid
);
2946 if (j
== data_len
/ sizeof(uint32_t)) {
2951 return nvme_dma(n
, list
, data_len
, DMA_DIRECTION_FROM_DEVICE
, req
);
2954 static uint16_t nvme_identify_nslist_csi(NvmeCtrl
*n
, NvmeRequest
*req
)
2957 NvmeIdentify
*c
= (NvmeIdentify
*)&req
->cmd
;
2958 uint32_t min_nsid
= le32_to_cpu(c
->nsid
);
2959 uint8_t list
[NVME_IDENTIFY_DATA_SIZE
] = {};
2960 static const int data_len
= sizeof(list
);
2961 uint32_t *list_ptr
= (uint32_t *)list
;
2964 trace_pci_nvme_identify_nslist_csi(min_nsid
, c
->csi
);
2967 * Same as in nvme_identify_nslist(), 0xffffffff/0xfffffffe are invalid.
2969 if (min_nsid
>= NVME_NSID_BROADCAST
- 1) {
2970 return NVME_INVALID_NSID
| NVME_DNR
;
2973 if (c
->csi
!= NVME_CSI_NVM
&& c
->csi
!= NVME_CSI_ZONED
) {
2974 return NVME_INVALID_FIELD
| NVME_DNR
;
2977 for (i
= 1; i
<= n
->num_namespaces
; i
++) {
2982 if (ns
->params
.nsid
<= min_nsid
|| c
->csi
!= ns
->csi
) {
2985 list_ptr
[j
++] = cpu_to_le32(ns
->params
.nsid
);
2986 if (j
== data_len
/ sizeof(uint32_t)) {
2991 return nvme_dma(n
, list
, data_len
, DMA_DIRECTION_FROM_DEVICE
, req
);
2994 static uint16_t nvme_identify_ns_descr_list(NvmeCtrl
*n
, NvmeRequest
*req
)
2997 NvmeIdentify
*c
= (NvmeIdentify
*)&req
->cmd
;
2998 uint32_t nsid
= le32_to_cpu(c
->nsid
);
2999 uint8_t list
[NVME_IDENTIFY_DATA_SIZE
] = {};
3004 uint8_t v
[NVME_NIDL_UUID
];
3012 struct data
*ns_descrs
= (struct data
*)list
;
3014 trace_pci_nvme_identify_ns_descr_list(nsid
);
3016 if (!nvme_nsid_valid(n
, nsid
) || nsid
== NVME_NSID_BROADCAST
) {
3017 return NVME_INVALID_NSID
| NVME_DNR
;
3020 ns
= nvme_ns(n
, nsid
);
3021 if (unlikely(!ns
)) {
3022 return NVME_INVALID_FIELD
| NVME_DNR
;
3026 * Because the NGUID and EUI64 fields are 0 in the Identify Namespace data
3027 * structure, a Namespace UUID (nidt = 0x3) must be reported in the
3028 * Namespace Identification Descriptor. Add the namespace UUID here.
3030 ns_descrs
->uuid
.hdr
.nidt
= NVME_NIDT_UUID
;
3031 ns_descrs
->uuid
.hdr
.nidl
= NVME_NIDL_UUID
;
3032 memcpy(&ns_descrs
->uuid
.v
, ns
->params
.uuid
.data
, NVME_NIDL_UUID
);
3034 ns_descrs
->csi
.hdr
.nidt
= NVME_NIDT_CSI
;
3035 ns_descrs
->csi
.hdr
.nidl
= NVME_NIDL_CSI
;
3036 ns_descrs
->csi
.v
= ns
->csi
;
3038 return nvme_dma(n
, list
, sizeof(list
), DMA_DIRECTION_FROM_DEVICE
, req
);
3041 static uint16_t nvme_identify_cmd_set(NvmeCtrl
*n
, NvmeRequest
*req
)
3043 uint8_t list
[NVME_IDENTIFY_DATA_SIZE
] = {};
3044 static const int data_len
= sizeof(list
);
3046 trace_pci_nvme_identify_cmd_set();
3048 NVME_SET_CSI(*list
, NVME_CSI_NVM
);
3049 NVME_SET_CSI(*list
, NVME_CSI_ZONED
);
3051 return nvme_dma(n
, list
, data_len
, DMA_DIRECTION_FROM_DEVICE
, req
);
3054 static uint16_t nvme_identify(NvmeCtrl
*n
, NvmeRequest
*req
)
3056 NvmeIdentify
*c
= (NvmeIdentify
*)&req
->cmd
;
3058 switch (le32_to_cpu(c
->cns
)) {
3059 case NVME_ID_CNS_NS
:
3061 case NVME_ID_CNS_NS_PRESENT
:
3062 return nvme_identify_ns(n
, req
);
3063 case NVME_ID_CNS_CS_NS
:
3065 case NVME_ID_CNS_CS_NS_PRESENT
:
3066 return nvme_identify_ns_csi(n
, req
);
3067 case NVME_ID_CNS_CTRL
:
3068 return nvme_identify_ctrl(n
, req
);
3069 case NVME_ID_CNS_CS_CTRL
:
3070 return nvme_identify_ctrl_csi(n
, req
);
3071 case NVME_ID_CNS_NS_ACTIVE_LIST
:
3073 case NVME_ID_CNS_NS_PRESENT_LIST
:
3074 return nvme_identify_nslist(n
, req
);
3075 case NVME_ID_CNS_CS_NS_ACTIVE_LIST
:
3077 case NVME_ID_CNS_CS_NS_PRESENT_LIST
:
3078 return nvme_identify_nslist_csi(n
, req
);
3079 case NVME_ID_CNS_NS_DESCR_LIST
:
3080 return nvme_identify_ns_descr_list(n
, req
);
3081 case NVME_ID_CNS_IO_COMMAND_SET
:
3082 return nvme_identify_cmd_set(n
, req
);
3084 trace_pci_nvme_err_invalid_identify_cns(le32_to_cpu(c
->cns
));
3085 return NVME_INVALID_FIELD
| NVME_DNR
;
3089 static uint16_t nvme_abort(NvmeCtrl
*n
, NvmeRequest
*req
)
3091 uint16_t sqid
= le32_to_cpu(req
->cmd
.cdw10
) & 0xffff;
3093 req
->cqe
.result
= 1;
3094 if (nvme_check_sqid(n
, sqid
)) {
3095 return NVME_INVALID_FIELD
| NVME_DNR
;
3098 return NVME_SUCCESS
;
3101 static inline void nvme_set_timestamp(NvmeCtrl
*n
, uint64_t ts
)
3103 trace_pci_nvme_setfeat_timestamp(ts
);
3105 n
->host_timestamp
= le64_to_cpu(ts
);
3106 n
->timestamp_set_qemu_clock_ms
= qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
);
3109 static inline uint64_t nvme_get_timestamp(const NvmeCtrl
*n
)
3111 uint64_t current_time
= qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
);
3112 uint64_t elapsed_time
= current_time
- n
->timestamp_set_qemu_clock_ms
;
3114 union nvme_timestamp
{
3116 uint64_t timestamp
:48;
3124 union nvme_timestamp ts
;
3126 ts
.timestamp
= n
->host_timestamp
+ elapsed_time
;
3128 /* If the host timestamp is non-zero, set the timestamp origin */
3129 ts
.origin
= n
->host_timestamp
? 0x01 : 0x00;
3131 trace_pci_nvme_getfeat_timestamp(ts
.all
);
3133 return cpu_to_le64(ts
.all
);
3136 static uint16_t nvme_get_feature_timestamp(NvmeCtrl
*n
, NvmeRequest
*req
)
3138 uint64_t timestamp
= nvme_get_timestamp(n
);
3140 return nvme_dma(n
, (uint8_t *)×tamp
, sizeof(timestamp
),
3141 DMA_DIRECTION_FROM_DEVICE
, req
);
3144 static uint16_t nvme_get_feature(NvmeCtrl
*n
, NvmeRequest
*req
)
3146 NvmeCmd
*cmd
= &req
->cmd
;
3147 uint32_t dw10
= le32_to_cpu(cmd
->cdw10
);
3148 uint32_t dw11
= le32_to_cpu(cmd
->cdw11
);
3149 uint32_t nsid
= le32_to_cpu(cmd
->nsid
);
3151 uint8_t fid
= NVME_GETSETFEAT_FID(dw10
);
3152 NvmeGetFeatureSelect sel
= NVME_GETFEAT_SELECT(dw10
);
3157 static const uint32_t nvme_feature_default
[NVME_FID_MAX
] = {
3158 [NVME_ARBITRATION
] = NVME_ARB_AB_NOLIMIT
,
3161 trace_pci_nvme_getfeat(nvme_cid(req
), nsid
, fid
, sel
, dw11
);
3163 if (!nvme_feature_support
[fid
]) {
3164 return NVME_INVALID_FIELD
| NVME_DNR
;
3167 if (nvme_feature_cap
[fid
] & NVME_FEAT_CAP_NS
) {
3168 if (!nvme_nsid_valid(n
, nsid
) || nsid
== NVME_NSID_BROADCAST
) {
3170 * The Reservation Notification Mask and Reservation Persistence
3171 * features require a status code of Invalid Field in Command when
3172 * NSID is 0xFFFFFFFF. Since the device does not support those
3173 * features we can always return Invalid Namespace or Format as we
3174 * should do for all other features.
3176 return NVME_INVALID_NSID
| NVME_DNR
;
3179 if (!nvme_ns(n
, nsid
)) {
3180 return NVME_INVALID_FIELD
| NVME_DNR
;
3185 case NVME_GETFEAT_SELECT_CURRENT
:
3187 case NVME_GETFEAT_SELECT_SAVED
:
3188 /* no features are saveable by the controller; fallthrough */
3189 case NVME_GETFEAT_SELECT_DEFAULT
:
3191 case NVME_GETFEAT_SELECT_CAP
:
3192 result
= nvme_feature_cap
[fid
];
3197 case NVME_TEMPERATURE_THRESHOLD
:
3201 * The controller only implements the Composite Temperature sensor, so
3202 * return 0 for all other sensors.
3204 if (NVME_TEMP_TMPSEL(dw11
) != NVME_TEMP_TMPSEL_COMPOSITE
) {
3208 switch (NVME_TEMP_THSEL(dw11
)) {
3209 case NVME_TEMP_THSEL_OVER
:
3210 result
= n
->features
.temp_thresh_hi
;
3212 case NVME_TEMP_THSEL_UNDER
:
3213 result
= n
->features
.temp_thresh_low
;
3217 return NVME_INVALID_FIELD
| NVME_DNR
;
3218 case NVME_ERROR_RECOVERY
:
3219 if (!nvme_nsid_valid(n
, nsid
)) {
3220 return NVME_INVALID_NSID
| NVME_DNR
;
3223 ns
= nvme_ns(n
, nsid
);
3224 if (unlikely(!ns
)) {
3225 return NVME_INVALID_FIELD
| NVME_DNR
;
3228 result
= ns
->features
.err_rec
;
3230 case NVME_VOLATILE_WRITE_CACHE
:
3232 for (i
= 1; i
<= n
->num_namespaces
; i
++) {
3238 result
= blk_enable_write_cache(ns
->blkconf
.blk
);
3243 trace_pci_nvme_getfeat_vwcache(result
? "enabled" : "disabled");
3245 case NVME_ASYNCHRONOUS_EVENT_CONF
:
3246 result
= n
->features
.async_config
;
3248 case NVME_TIMESTAMP
:
3249 return nvme_get_feature_timestamp(n
, req
);
3256 case NVME_TEMPERATURE_THRESHOLD
:
3259 if (NVME_TEMP_TMPSEL(dw11
) != NVME_TEMP_TMPSEL_COMPOSITE
) {
3263 if (NVME_TEMP_THSEL(dw11
) == NVME_TEMP_THSEL_OVER
) {
3264 result
= NVME_TEMPERATURE_WARNING
;
3268 case NVME_NUMBER_OF_QUEUES
:
3269 result
= (n
->params
.max_ioqpairs
- 1) |
3270 ((n
->params
.max_ioqpairs
- 1) << 16);
3271 trace_pci_nvme_getfeat_numq(result
);
3273 case NVME_INTERRUPT_VECTOR_CONF
:
3275 if (iv
>= n
->params
.max_ioqpairs
+ 1) {
3276 return NVME_INVALID_FIELD
| NVME_DNR
;
3280 if (iv
== n
->admin_cq
.vector
) {
3281 result
|= NVME_INTVC_NOCOALESCING
;
3284 case NVME_COMMAND_SET_PROFILE
:
3288 result
= nvme_feature_default
[fid
];
3293 req
->cqe
.result
= cpu_to_le32(result
);
3294 return NVME_SUCCESS
;
3297 static uint16_t nvme_set_feature_timestamp(NvmeCtrl
*n
, NvmeRequest
*req
)
3302 ret
= nvme_dma(n
, (uint8_t *)×tamp
, sizeof(timestamp
),
3303 DMA_DIRECTION_TO_DEVICE
, req
);
3308 nvme_set_timestamp(n
, timestamp
);
3310 return NVME_SUCCESS
;
3313 static uint16_t nvme_set_feature(NvmeCtrl
*n
, NvmeRequest
*req
)
3315 NvmeNamespace
*ns
= NULL
;
3317 NvmeCmd
*cmd
= &req
->cmd
;
3318 uint32_t dw10
= le32_to_cpu(cmd
->cdw10
);
3319 uint32_t dw11
= le32_to_cpu(cmd
->cdw11
);
3320 uint32_t nsid
= le32_to_cpu(cmd
->nsid
);
3321 uint8_t fid
= NVME_GETSETFEAT_FID(dw10
);
3322 uint8_t save
= NVME_SETFEAT_SAVE(dw10
);
3325 trace_pci_nvme_setfeat(nvme_cid(req
), nsid
, fid
, save
, dw11
);
3327 if (save
&& !(nvme_feature_cap
[fid
] & NVME_FEAT_CAP_SAVE
)) {
3328 return NVME_FID_NOT_SAVEABLE
| NVME_DNR
;
3331 if (!nvme_feature_support
[fid
]) {
3332 return NVME_INVALID_FIELD
| NVME_DNR
;
3335 if (nvme_feature_cap
[fid
] & NVME_FEAT_CAP_NS
) {
3336 if (nsid
!= NVME_NSID_BROADCAST
) {
3337 if (!nvme_nsid_valid(n
, nsid
)) {
3338 return NVME_INVALID_NSID
| NVME_DNR
;
3341 ns
= nvme_ns(n
, nsid
);
3342 if (unlikely(!ns
)) {
3343 return NVME_INVALID_FIELD
| NVME_DNR
;
3346 } else if (nsid
&& nsid
!= NVME_NSID_BROADCAST
) {
3347 if (!nvme_nsid_valid(n
, nsid
)) {
3348 return NVME_INVALID_NSID
| NVME_DNR
;
3351 return NVME_FEAT_NOT_NS_SPEC
| NVME_DNR
;
3354 if (!(nvme_feature_cap
[fid
] & NVME_FEAT_CAP_CHANGE
)) {
3355 return NVME_FEAT_NOT_CHANGEABLE
| NVME_DNR
;
3359 case NVME_TEMPERATURE_THRESHOLD
:
3360 if (NVME_TEMP_TMPSEL(dw11
) != NVME_TEMP_TMPSEL_COMPOSITE
) {
3364 switch (NVME_TEMP_THSEL(dw11
)) {
3365 case NVME_TEMP_THSEL_OVER
:
3366 n
->features
.temp_thresh_hi
= NVME_TEMP_TMPTH(dw11
);
3368 case NVME_TEMP_THSEL_UNDER
:
3369 n
->features
.temp_thresh_low
= NVME_TEMP_TMPTH(dw11
);
3372 return NVME_INVALID_FIELD
| NVME_DNR
;
3375 if ((n
->temperature
>= n
->features
.temp_thresh_hi
) ||
3376 (n
->temperature
<= n
->features
.temp_thresh_low
)) {
3377 nvme_smart_event(n
, NVME_AER_INFO_SMART_TEMP_THRESH
);
3381 case NVME_ERROR_RECOVERY
:
3382 if (nsid
== NVME_NSID_BROADCAST
) {
3383 for (i
= 1; i
<= n
->num_namespaces
; i
++) {
3390 if (NVME_ID_NS_NSFEAT_DULBE(ns
->id_ns
.nsfeat
)) {
3391 ns
->features
.err_rec
= dw11
;
3399 if (NVME_ID_NS_NSFEAT_DULBE(ns
->id_ns
.nsfeat
)) {
3400 ns
->features
.err_rec
= dw11
;
3403 case NVME_VOLATILE_WRITE_CACHE
:
3404 for (i
= 1; i
<= n
->num_namespaces
; i
++) {
3410 if (!(dw11
& 0x1) && blk_enable_write_cache(ns
->blkconf
.blk
)) {
3411 blk_flush(ns
->blkconf
.blk
);
3414 blk_set_enable_write_cache(ns
->blkconf
.blk
, dw11
& 1);
3419 case NVME_NUMBER_OF_QUEUES
:
3420 if (n
->qs_created
) {
3421 return NVME_CMD_SEQ_ERROR
| NVME_DNR
;
3425 * NVMe v1.3, Section 5.21.1.7: 0xffff is not an allowed value for NCQR
3428 if ((dw11
& 0xffff) == 0xffff || ((dw11
>> 16) & 0xffff) == 0xffff) {
3429 return NVME_INVALID_FIELD
| NVME_DNR
;
3432 trace_pci_nvme_setfeat_numq((dw11
& 0xFFFF) + 1,
3433 ((dw11
>> 16) & 0xFFFF) + 1,
3434 n
->params
.max_ioqpairs
,
3435 n
->params
.max_ioqpairs
);
3436 req
->cqe
.result
= cpu_to_le32((n
->params
.max_ioqpairs
- 1) |
3437 ((n
->params
.max_ioqpairs
- 1) << 16));
3439 case NVME_ASYNCHRONOUS_EVENT_CONF
:
3440 n
->features
.async_config
= dw11
;
3442 case NVME_TIMESTAMP
:
3443 return nvme_set_feature_timestamp(n
, req
);
3444 case NVME_COMMAND_SET_PROFILE
:
3446 trace_pci_nvme_err_invalid_iocsci(dw11
& 0x1ff);
3447 return NVME_CMD_SET_CMB_REJECTED
| NVME_DNR
;
3451 return NVME_FEAT_NOT_CHANGEABLE
| NVME_DNR
;
3453 return NVME_SUCCESS
;
3456 static uint16_t nvme_aer(NvmeCtrl
*n
, NvmeRequest
*req
)
3458 trace_pci_nvme_aer(nvme_cid(req
));
3460 if (n
->outstanding_aers
> n
->params
.aerl
) {
3461 trace_pci_nvme_aer_aerl_exceeded();
3462 return NVME_AER_LIMIT_EXCEEDED
;
3465 n
->aer_reqs
[n
->outstanding_aers
] = req
;
3466 n
->outstanding_aers
++;
3468 if (!QTAILQ_EMPTY(&n
->aer_queue
)) {
3469 nvme_process_aers(n
);
3472 return NVME_NO_COMPLETE
;
3475 static uint16_t nvme_admin_cmd(NvmeCtrl
*n
, NvmeRequest
*req
)
3477 trace_pci_nvme_admin_cmd(nvme_cid(req
), nvme_sqid(req
), req
->cmd
.opcode
,
3478 nvme_adm_opc_str(req
->cmd
.opcode
));
3480 if (!(nvme_cse_acs
[req
->cmd
.opcode
] & NVME_CMD_EFF_CSUPP
)) {
3481 trace_pci_nvme_err_invalid_admin_opc(req
->cmd
.opcode
);
3482 return NVME_INVALID_OPCODE
| NVME_DNR
;
3485 switch (req
->cmd
.opcode
) {
3486 case NVME_ADM_CMD_DELETE_SQ
:
3487 return nvme_del_sq(n
, req
);
3488 case NVME_ADM_CMD_CREATE_SQ
:
3489 return nvme_create_sq(n
, req
);
3490 case NVME_ADM_CMD_GET_LOG_PAGE
:
3491 return nvme_get_log(n
, req
);
3492 case NVME_ADM_CMD_DELETE_CQ
:
3493 return nvme_del_cq(n
, req
);
3494 case NVME_ADM_CMD_CREATE_CQ
:
3495 return nvme_create_cq(n
, req
);
3496 case NVME_ADM_CMD_IDENTIFY
:
3497 return nvme_identify(n
, req
);
3498 case NVME_ADM_CMD_ABORT
:
3499 return nvme_abort(n
, req
);
3500 case NVME_ADM_CMD_SET_FEATURES
:
3501 return nvme_set_feature(n
, req
);
3502 case NVME_ADM_CMD_GET_FEATURES
:
3503 return nvme_get_feature(n
, req
);
3504 case NVME_ADM_CMD_ASYNC_EV_REQ
:
3505 return nvme_aer(n
, req
);
3510 return NVME_INVALID_OPCODE
| NVME_DNR
;
3513 static void nvme_process_sq(void *opaque
)
3515 NvmeSQueue
*sq
= opaque
;
3516 NvmeCtrl
*n
= sq
->ctrl
;
3517 NvmeCQueue
*cq
= n
->cq
[sq
->cqid
];
3524 while (!(nvme_sq_empty(sq
) || QTAILQ_EMPTY(&sq
->req_list
))) {
3525 addr
= sq
->dma_addr
+ sq
->head
* n
->sqe_size
;
3526 if (nvme_addr_read(n
, addr
, (void *)&cmd
, sizeof(cmd
))) {
3527 trace_pci_nvme_err_addr_read(addr
);
3528 trace_pci_nvme_err_cfs();
3529 n
->bar
.csts
= NVME_CSTS_FAILED
;
3532 nvme_inc_sq_head(sq
);
3534 req
= QTAILQ_FIRST(&sq
->req_list
);
3535 QTAILQ_REMOVE(&sq
->req_list
, req
, entry
);
3536 QTAILQ_INSERT_TAIL(&sq
->out_req_list
, req
, entry
);
3537 nvme_req_clear(req
);
3538 req
->cqe
.cid
= cmd
.cid
;
3539 memcpy(&req
->cmd
, &cmd
, sizeof(NvmeCmd
));
3541 status
= sq
->sqid
? nvme_io_cmd(n
, req
) :
3542 nvme_admin_cmd(n
, req
);
3543 if (status
!= NVME_NO_COMPLETE
) {
3544 req
->status
= status
;
3545 nvme_enqueue_req_completion(cq
, req
);
3550 static void nvme_ctrl_reset(NvmeCtrl
*n
)
3555 for (i
= 1; i
<= n
->num_namespaces
; i
++) {
3564 for (i
= 0; i
< n
->params
.max_ioqpairs
+ 1; i
++) {
3565 if (n
->sq
[i
] != NULL
) {
3566 nvme_free_sq(n
->sq
[i
], n
);
3569 for (i
= 0; i
< n
->params
.max_ioqpairs
+ 1; i
++) {
3570 if (n
->cq
[i
] != NULL
) {
3571 nvme_free_cq(n
->cq
[i
], n
);
3575 while (!QTAILQ_EMPTY(&n
->aer_queue
)) {
3576 NvmeAsyncEvent
*event
= QTAILQ_FIRST(&n
->aer_queue
);
3577 QTAILQ_REMOVE(&n
->aer_queue
, event
, entry
);
3582 n
->outstanding_aers
= 0;
3583 n
->qs_created
= false;
3588 static void nvme_ctrl_shutdown(NvmeCtrl
*n
)
3594 memory_region_msync(&n
->pmr
.dev
->mr
, 0, n
->pmr
.dev
->size
);
3597 for (i
= 1; i
<= n
->num_namespaces
; i
++) {
3603 nvme_ns_shutdown(ns
);
3607 static void nvme_select_ns_iocs(NvmeCtrl
*n
)
3612 for (i
= 1; i
<= n
->num_namespaces
; i
++) {
3617 ns
->iocs
= nvme_cse_iocs_none
;
3620 if (NVME_CC_CSS(n
->bar
.cc
) != NVME_CC_CSS_ADMIN_ONLY
) {
3621 ns
->iocs
= nvme_cse_iocs_nvm
;
3624 case NVME_CSI_ZONED
:
3625 if (NVME_CC_CSS(n
->bar
.cc
) == NVME_CC_CSS_CSI
) {
3626 ns
->iocs
= nvme_cse_iocs_zoned
;
3627 } else if (NVME_CC_CSS(n
->bar
.cc
) == NVME_CC_CSS_NVM
) {
3628 ns
->iocs
= nvme_cse_iocs_nvm
;
3635 static int nvme_start_ctrl(NvmeCtrl
*n
)
3637 uint32_t page_bits
= NVME_CC_MPS(n
->bar
.cc
) + 12;
3638 uint32_t page_size
= 1 << page_bits
;
3640 if (unlikely(n
->cq
[0])) {
3641 trace_pci_nvme_err_startfail_cq();
3644 if (unlikely(n
->sq
[0])) {
3645 trace_pci_nvme_err_startfail_sq();
3648 if (unlikely(!n
->bar
.asq
)) {
3649 trace_pci_nvme_err_startfail_nbarasq();
3652 if (unlikely(!n
->bar
.acq
)) {
3653 trace_pci_nvme_err_startfail_nbaracq();
3656 if (unlikely(n
->bar
.asq
& (page_size
- 1))) {
3657 trace_pci_nvme_err_startfail_asq_misaligned(n
->bar
.asq
);
3660 if (unlikely(n
->bar
.acq
& (page_size
- 1))) {
3661 trace_pci_nvme_err_startfail_acq_misaligned(n
->bar
.acq
);
3664 if (unlikely(!(NVME_CAP_CSS(n
->bar
.cap
) & (1 << NVME_CC_CSS(n
->bar
.cc
))))) {
3665 trace_pci_nvme_err_startfail_css(NVME_CC_CSS(n
->bar
.cc
));
3668 if (unlikely(NVME_CC_MPS(n
->bar
.cc
) <
3669 NVME_CAP_MPSMIN(n
->bar
.cap
))) {
3670 trace_pci_nvme_err_startfail_page_too_small(
3671 NVME_CC_MPS(n
->bar
.cc
),
3672 NVME_CAP_MPSMIN(n
->bar
.cap
));
3675 if (unlikely(NVME_CC_MPS(n
->bar
.cc
) >
3676 NVME_CAP_MPSMAX(n
->bar
.cap
))) {
3677 trace_pci_nvme_err_startfail_page_too_large(
3678 NVME_CC_MPS(n
->bar
.cc
),
3679 NVME_CAP_MPSMAX(n
->bar
.cap
));
3682 if (unlikely(NVME_CC_IOCQES(n
->bar
.cc
) <
3683 NVME_CTRL_CQES_MIN(n
->id_ctrl
.cqes
))) {
3684 trace_pci_nvme_err_startfail_cqent_too_small(
3685 NVME_CC_IOCQES(n
->bar
.cc
),
3686 NVME_CTRL_CQES_MIN(n
->bar
.cap
));
3689 if (unlikely(NVME_CC_IOCQES(n
->bar
.cc
) >
3690 NVME_CTRL_CQES_MAX(n
->id_ctrl
.cqes
))) {
3691 trace_pci_nvme_err_startfail_cqent_too_large(
3692 NVME_CC_IOCQES(n
->bar
.cc
),
3693 NVME_CTRL_CQES_MAX(n
->bar
.cap
));
3696 if (unlikely(NVME_CC_IOSQES(n
->bar
.cc
) <
3697 NVME_CTRL_SQES_MIN(n
->id_ctrl
.sqes
))) {
3698 trace_pci_nvme_err_startfail_sqent_too_small(
3699 NVME_CC_IOSQES(n
->bar
.cc
),
3700 NVME_CTRL_SQES_MIN(n
->bar
.cap
));
3703 if (unlikely(NVME_CC_IOSQES(n
->bar
.cc
) >
3704 NVME_CTRL_SQES_MAX(n
->id_ctrl
.sqes
))) {
3705 trace_pci_nvme_err_startfail_sqent_too_large(
3706 NVME_CC_IOSQES(n
->bar
.cc
),
3707 NVME_CTRL_SQES_MAX(n
->bar
.cap
));
3710 if (unlikely(!NVME_AQA_ASQS(n
->bar
.aqa
))) {
3711 trace_pci_nvme_err_startfail_asqent_sz_zero();
3714 if (unlikely(!NVME_AQA_ACQS(n
->bar
.aqa
))) {
3715 trace_pci_nvme_err_startfail_acqent_sz_zero();
3719 n
->page_bits
= page_bits
;
3720 n
->page_size
= page_size
;
3721 n
->max_prp_ents
= n
->page_size
/ sizeof(uint64_t);
3722 n
->cqe_size
= 1 << NVME_CC_IOCQES(n
->bar
.cc
);
3723 n
->sqe_size
= 1 << NVME_CC_IOSQES(n
->bar
.cc
);
3724 nvme_init_cq(&n
->admin_cq
, n
, n
->bar
.acq
, 0, 0,
3725 NVME_AQA_ACQS(n
->bar
.aqa
) + 1, 1);
3726 nvme_init_sq(&n
->admin_sq
, n
, n
->bar
.asq
, 0, 0,
3727 NVME_AQA_ASQS(n
->bar
.aqa
) + 1);
3729 if (!n
->params
.zasl_bs
) {
3730 n
->zasl
= n
->params
.mdts
;
3732 if (n
->params
.zasl_bs
< n
->page_size
) {
3733 trace_pci_nvme_err_startfail_zasl_too_small(n
->params
.zasl_bs
,
3737 n
->zasl
= 31 - clz32(n
->params
.zasl_bs
/ n
->page_size
);
3740 nvme_set_timestamp(n
, 0ULL);
3742 QTAILQ_INIT(&n
->aer_queue
);
3744 nvme_select_ns_iocs(n
);
3749 static void nvme_cmb_enable_regs(NvmeCtrl
*n
)
3751 NVME_CMBLOC_SET_CDPCILS(n
->bar
.cmbloc
, 1);
3752 NVME_CMBLOC_SET_CDPMLS(n
->bar
.cmbloc
, 1);
3753 NVME_CMBLOC_SET_BIR(n
->bar
.cmbloc
, NVME_CMB_BIR
);
3755 NVME_CMBSZ_SET_SQS(n
->bar
.cmbsz
, 1);
3756 NVME_CMBSZ_SET_CQS(n
->bar
.cmbsz
, 0);
3757 NVME_CMBSZ_SET_LISTS(n
->bar
.cmbsz
, 1);
3758 NVME_CMBSZ_SET_RDS(n
->bar
.cmbsz
, 1);
3759 NVME_CMBSZ_SET_WDS(n
->bar
.cmbsz
, 1);
3760 NVME_CMBSZ_SET_SZU(n
->bar
.cmbsz
, 2); /* MBs */
3761 NVME_CMBSZ_SET_SZ(n
->bar
.cmbsz
, n
->params
.cmb_size_mb
);
3764 static void nvme_write_bar(NvmeCtrl
*n
, hwaddr offset
, uint64_t data
,
3767 if (unlikely(offset
& (sizeof(uint32_t) - 1))) {
3768 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_misaligned32
,
3769 "MMIO write not 32-bit aligned,"
3770 " offset=0x%"PRIx64
"", offset
);
3771 /* should be ignored, fall through for now */
3774 if (unlikely(size
< sizeof(uint32_t))) {
3775 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_toosmall
,
3776 "MMIO write smaller than 32-bits,"
3777 " offset=0x%"PRIx64
", size=%u",
3779 /* should be ignored, fall through for now */
3783 case 0xc: /* INTMS */
3784 if (unlikely(msix_enabled(&(n
->parent_obj
)))) {
3785 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_intmask_with_msix
,
3786 "undefined access to interrupt mask set"
3787 " when MSI-X is enabled");
3788 /* should be ignored, fall through for now */
3790 n
->bar
.intms
|= data
& 0xffffffff;
3791 n
->bar
.intmc
= n
->bar
.intms
;
3792 trace_pci_nvme_mmio_intm_set(data
& 0xffffffff, n
->bar
.intmc
);
3795 case 0x10: /* INTMC */
3796 if (unlikely(msix_enabled(&(n
->parent_obj
)))) {
3797 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_intmask_with_msix
,
3798 "undefined access to interrupt mask clr"
3799 " when MSI-X is enabled");
3800 /* should be ignored, fall through for now */
3802 n
->bar
.intms
&= ~(data
& 0xffffffff);
3803 n
->bar
.intmc
= n
->bar
.intms
;
3804 trace_pci_nvme_mmio_intm_clr(data
& 0xffffffff, n
->bar
.intmc
);
3808 trace_pci_nvme_mmio_cfg(data
& 0xffffffff);
3809 /* Windows first sends data, then sends enable bit */
3810 if (!NVME_CC_EN(data
) && !NVME_CC_EN(n
->bar
.cc
) &&
3811 !NVME_CC_SHN(data
) && !NVME_CC_SHN(n
->bar
.cc
))
3816 if (NVME_CC_EN(data
) && !NVME_CC_EN(n
->bar
.cc
)) {
3818 if (unlikely(nvme_start_ctrl(n
))) {
3819 trace_pci_nvme_err_startfail();
3820 n
->bar
.csts
= NVME_CSTS_FAILED
;
3822 trace_pci_nvme_mmio_start_success();
3823 n
->bar
.csts
= NVME_CSTS_READY
;
3825 } else if (!NVME_CC_EN(data
) && NVME_CC_EN(n
->bar
.cc
)) {
3826 trace_pci_nvme_mmio_stopped();
3828 n
->bar
.csts
&= ~NVME_CSTS_READY
;
3830 if (NVME_CC_SHN(data
) && !(NVME_CC_SHN(n
->bar
.cc
))) {
3831 trace_pci_nvme_mmio_shutdown_set();
3832 nvme_ctrl_shutdown(n
);
3834 n
->bar
.csts
|= NVME_CSTS_SHST_COMPLETE
;
3835 } else if (!NVME_CC_SHN(data
) && NVME_CC_SHN(n
->bar
.cc
)) {
3836 trace_pci_nvme_mmio_shutdown_cleared();
3837 n
->bar
.csts
&= ~NVME_CSTS_SHST_COMPLETE
;
3841 case 0x1C: /* CSTS */
3842 if (data
& (1 << 4)) {
3843 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_ssreset_w1c_unsupported
,
3844 "attempted to W1C CSTS.NSSRO"
3845 " but CAP.NSSRS is zero (not supported)");
3846 } else if (data
!= 0) {
3847 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_ro_csts
,
3848 "attempted to set a read only bit"
3849 " of controller status");
3852 case 0x20: /* NSSR */
3853 if (data
== 0x4E564D65) {
3854 trace_pci_nvme_ub_mmiowr_ssreset_unsupported();
3856 /* The spec says that writes of other values have no effect */
3860 case 0x24: /* AQA */
3861 n
->bar
.aqa
= data
& 0xffffffff;
3862 trace_pci_nvme_mmio_aqattr(data
& 0xffffffff);
3864 case 0x28: /* ASQ */
3865 n
->bar
.asq
= size
== 8 ? data
:
3866 (n
->bar
.asq
& ~0xffffffffULL
) | (data
& 0xffffffff);
3867 trace_pci_nvme_mmio_asqaddr(data
);
3869 case 0x2c: /* ASQ hi */
3870 n
->bar
.asq
= (n
->bar
.asq
& 0xffffffff) | (data
<< 32);
3871 trace_pci_nvme_mmio_asqaddr_hi(data
, n
->bar
.asq
);
3873 case 0x30: /* ACQ */
3874 trace_pci_nvme_mmio_acqaddr(data
);
3875 n
->bar
.acq
= size
== 8 ? data
:
3876 (n
->bar
.acq
& ~0xffffffffULL
) | (data
& 0xffffffff);
3878 case 0x34: /* ACQ hi */
3879 n
->bar
.acq
= (n
->bar
.acq
& 0xffffffff) | (data
<< 32);
3880 trace_pci_nvme_mmio_acqaddr_hi(data
, n
->bar
.acq
);
3882 case 0x38: /* CMBLOC */
3883 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_cmbloc_reserved
,
3884 "invalid write to reserved CMBLOC"
3885 " when CMBSZ is zero, ignored");
3887 case 0x3C: /* CMBSZ */
3888 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_cmbsz_readonly
,
3889 "invalid write to read only CMBSZ, ignored");
3891 case 0x50: /* CMBMSC */
3892 if (!NVME_CAP_CMBS(n
->bar
.cap
)) {
3896 n
->bar
.cmbmsc
= size
== 8 ? data
:
3897 (n
->bar
.cmbmsc
& ~0xffffffff) | (data
& 0xffffffff);
3898 n
->cmb
.cmse
= false;
3900 if (NVME_CMBMSC_CRE(data
)) {
3901 nvme_cmb_enable_regs(n
);
3903 if (NVME_CMBMSC_CMSE(data
)) {
3904 hwaddr cba
= NVME_CMBMSC_CBA(data
) << CMBMSC_CBA_SHIFT
;
3905 if (cba
+ int128_get64(n
->cmb
.mem
.size
) < cba
) {
3906 NVME_CMBSTS_SET_CBAI(n
->bar
.cmbsts
, 1);
3919 case 0x54: /* CMBMSC hi */
3920 n
->bar
.cmbmsc
= (n
->bar
.cmbmsc
& 0xffffffff) | (data
<< 32);
3923 case 0xE00: /* PMRCAP */
3924 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrcap_readonly
,
3925 "invalid write to PMRCAP register, ignored");
3927 case 0xE04: /* PMRCTL */
3928 n
->bar
.pmrctl
= data
;
3929 if (NVME_PMRCTL_EN(data
)) {
3930 memory_region_set_enabled(&n
->pmr
.dev
->mr
, true);
3933 memory_region_set_enabled(&n
->pmr
.dev
->mr
, false);
3934 NVME_PMRSTS_SET_NRDY(n
->bar
.pmrsts
, 1);
3935 n
->pmr
.cmse
= false;
3938 case 0xE08: /* PMRSTS */
3939 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrsts_readonly
,
3940 "invalid write to PMRSTS register, ignored");
3942 case 0xE0C: /* PMREBS */
3943 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrebs_readonly
,
3944 "invalid write to PMREBS register, ignored");
3946 case 0xE10: /* PMRSWTP */
3947 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrswtp_readonly
,
3948 "invalid write to PMRSWTP register, ignored");
3950 case 0xE14: /* PMRMSCL */
3951 if (!NVME_CAP_PMRS(n
->bar
.cap
)) {
3955 n
->bar
.pmrmsc
= (n
->bar
.pmrmsc
& ~0xffffffff) | (data
& 0xffffffff);
3956 n
->pmr
.cmse
= false;
3958 if (NVME_PMRMSC_CMSE(n
->bar
.pmrmsc
)) {
3959 hwaddr cba
= NVME_PMRMSC_CBA(n
->bar
.pmrmsc
) << PMRMSC_CBA_SHIFT
;
3960 if (cba
+ int128_get64(n
->pmr
.dev
->mr
.size
) < cba
) {
3961 NVME_PMRSTS_SET_CBAI(n
->bar
.pmrsts
, 1);
3970 case 0xE18: /* PMRMSCU */
3971 if (!NVME_CAP_PMRS(n
->bar
.cap
)) {
3975 n
->bar
.pmrmsc
= (n
->bar
.pmrmsc
& 0xffffffff) | (data
<< 32);
3978 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_invalid
,
3979 "invalid MMIO write,"
3980 " offset=0x%"PRIx64
", data=%"PRIx64
"",
3986 static uint64_t nvme_mmio_read(void *opaque
, hwaddr addr
, unsigned size
)
3988 NvmeCtrl
*n
= (NvmeCtrl
*)opaque
;
3989 uint8_t *ptr
= (uint8_t *)&n
->bar
;
3992 trace_pci_nvme_mmio_read(addr
, size
);
3994 if (unlikely(addr
& (sizeof(uint32_t) - 1))) {
3995 NVME_GUEST_ERR(pci_nvme_ub_mmiord_misaligned32
,
3996 "MMIO read not 32-bit aligned,"
3997 " offset=0x%"PRIx64
"", addr
);
3998 /* should RAZ, fall through for now */
3999 } else if (unlikely(size
< sizeof(uint32_t))) {
4000 NVME_GUEST_ERR(pci_nvme_ub_mmiord_toosmall
,
4001 "MMIO read smaller than 32-bits,"
4002 " offset=0x%"PRIx64
"", addr
);
4003 /* should RAZ, fall through for now */
4006 if (addr
< sizeof(n
->bar
)) {
4008 * When PMRWBM bit 1 is set then read from
4009 * from PMRSTS should ensure prior writes
4010 * made it to persistent media
4012 if (addr
== 0xE08 &&
4013 (NVME_PMRCAP_PMRWBM(n
->bar
.pmrcap
) & 0x02)) {
4014 memory_region_msync(&n
->pmr
.dev
->mr
, 0, n
->pmr
.dev
->size
);
4016 memcpy(&val
, ptr
+ addr
, size
);
4018 NVME_GUEST_ERR(pci_nvme_ub_mmiord_invalid_ofs
,
4019 "MMIO read beyond last register,"
4020 " offset=0x%"PRIx64
", returning 0", addr
);
4026 static void nvme_process_db(NvmeCtrl
*n
, hwaddr addr
, int val
)
4030 if (unlikely(addr
& ((1 << 2) - 1))) {
4031 NVME_GUEST_ERR(pci_nvme_ub_db_wr_misaligned
,
4032 "doorbell write not 32-bit aligned,"
4033 " offset=0x%"PRIx64
", ignoring", addr
);
4037 if (((addr
- 0x1000) >> 2) & 1) {
4038 /* Completion queue doorbell write */
4040 uint16_t new_head
= val
& 0xffff;
4044 qid
= (addr
- (0x1000 + (1 << 2))) >> 3;
4045 if (unlikely(nvme_check_cqid(n
, qid
))) {
4046 NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_cq
,
4047 "completion queue doorbell write"
4048 " for nonexistent queue,"
4049 " sqid=%"PRIu32
", ignoring", qid
);
4052 * NVM Express v1.3d, Section 4.1 state: "If host software writes
4053 * an invalid value to the Submission Queue Tail Doorbell or
4054 * Completion Queue Head Doorbell regiter and an Asynchronous Event
4055 * Request command is outstanding, then an asynchronous event is
4056 * posted to the Admin Completion Queue with a status code of
4057 * Invalid Doorbell Write Value."
4059 * Also note that the spec includes the "Invalid Doorbell Register"
4060 * status code, but nowhere does it specify when to use it.
4061 * However, it seems reasonable to use it here in a similar
4064 if (n
->outstanding_aers
) {
4065 nvme_enqueue_event(n
, NVME_AER_TYPE_ERROR
,
4066 NVME_AER_INFO_ERR_INVALID_DB_REGISTER
,
4067 NVME_LOG_ERROR_INFO
);
4074 if (unlikely(new_head
>= cq
->size
)) {
4075 NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_cqhead
,
4076 "completion queue doorbell write value"
4077 " beyond queue size, sqid=%"PRIu32
","
4078 " new_head=%"PRIu16
", ignoring",
4081 if (n
->outstanding_aers
) {
4082 nvme_enqueue_event(n
, NVME_AER_TYPE_ERROR
,
4083 NVME_AER_INFO_ERR_INVALID_DB_VALUE
,
4084 NVME_LOG_ERROR_INFO
);
4090 trace_pci_nvme_mmio_doorbell_cq(cq
->cqid
, new_head
);
4092 start_sqs
= nvme_cq_full(cq
) ? 1 : 0;
4093 cq
->head
= new_head
;
4096 QTAILQ_FOREACH(sq
, &cq
->sq_list
, entry
) {
4097 timer_mod(sq
->timer
, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) + 500);
4099 timer_mod(cq
->timer
, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) + 500);
4102 if (cq
->tail
== cq
->head
) {
4103 nvme_irq_deassert(n
, cq
);
4106 /* Submission queue doorbell write */
4108 uint16_t new_tail
= val
& 0xffff;
4111 qid
= (addr
- 0x1000) >> 3;
4112 if (unlikely(nvme_check_sqid(n
, qid
))) {
4113 NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_sq
,
4114 "submission queue doorbell write"
4115 " for nonexistent queue,"
4116 " sqid=%"PRIu32
", ignoring", qid
);
4118 if (n
->outstanding_aers
) {
4119 nvme_enqueue_event(n
, NVME_AER_TYPE_ERROR
,
4120 NVME_AER_INFO_ERR_INVALID_DB_REGISTER
,
4121 NVME_LOG_ERROR_INFO
);
4128 if (unlikely(new_tail
>= sq
->size
)) {
4129 NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_sqtail
,
4130 "submission queue doorbell write value"
4131 " beyond queue size, sqid=%"PRIu32
","
4132 " new_tail=%"PRIu16
", ignoring",
4135 if (n
->outstanding_aers
) {
4136 nvme_enqueue_event(n
, NVME_AER_TYPE_ERROR
,
4137 NVME_AER_INFO_ERR_INVALID_DB_VALUE
,
4138 NVME_LOG_ERROR_INFO
);
4144 trace_pci_nvme_mmio_doorbell_sq(sq
->sqid
, new_tail
);
4146 sq
->tail
= new_tail
;
4147 timer_mod(sq
->timer
, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) + 500);
4151 static void nvme_mmio_write(void *opaque
, hwaddr addr
, uint64_t data
,
4154 NvmeCtrl
*n
= (NvmeCtrl
*)opaque
;
4156 trace_pci_nvme_mmio_write(addr
, data
, size
);
4158 if (addr
< sizeof(n
->bar
)) {
4159 nvme_write_bar(n
, addr
, data
, size
);
4161 nvme_process_db(n
, addr
, data
);
4165 static const MemoryRegionOps nvme_mmio_ops
= {
4166 .read
= nvme_mmio_read
,
4167 .write
= nvme_mmio_write
,
4168 .endianness
= DEVICE_LITTLE_ENDIAN
,
4170 .min_access_size
= 2,
4171 .max_access_size
= 8,
4175 static void nvme_cmb_write(void *opaque
, hwaddr addr
, uint64_t data
,
4178 NvmeCtrl
*n
= (NvmeCtrl
*)opaque
;
4179 stn_le_p(&n
->cmb
.buf
[addr
], size
, data
);
4182 static uint64_t nvme_cmb_read(void *opaque
, hwaddr addr
, unsigned size
)
4184 NvmeCtrl
*n
= (NvmeCtrl
*)opaque
;
4185 return ldn_le_p(&n
->cmb
.buf
[addr
], size
);
4188 static const MemoryRegionOps nvme_cmb_ops
= {
4189 .read
= nvme_cmb_read
,
4190 .write
= nvme_cmb_write
,
4191 .endianness
= DEVICE_LITTLE_ENDIAN
,
4193 .min_access_size
= 1,
4194 .max_access_size
= 8,
4198 static void nvme_check_constraints(NvmeCtrl
*n
, Error
**errp
)
4200 NvmeParams
*params
= &n
->params
;
4202 if (params
->num_queues
) {
4203 warn_report("num_queues is deprecated; please use max_ioqpairs "
4206 params
->max_ioqpairs
= params
->num_queues
- 1;
4210 warn_report("drive property is deprecated; "
4211 "please use an nvme-ns device instead");
4214 if (params
->max_ioqpairs
< 1 ||
4215 params
->max_ioqpairs
> NVME_MAX_IOQPAIRS
) {
4216 error_setg(errp
, "max_ioqpairs must be between 1 and %d",
4221 if (params
->msix_qsize
< 1 ||
4222 params
->msix_qsize
> PCI_MSIX_FLAGS_QSIZE
+ 1) {
4223 error_setg(errp
, "msix_qsize must be between 1 and %d",
4224 PCI_MSIX_FLAGS_QSIZE
+ 1);
4228 if (!params
->serial
) {
4229 error_setg(errp
, "serial property not set");
4234 if (host_memory_backend_is_mapped(n
->pmr
.dev
)) {
4235 error_setg(errp
, "can't use already busy memdev: %s",
4236 object_get_canonical_path_component(OBJECT(n
->pmr
.dev
)));
4240 if (!is_power_of_2(n
->pmr
.dev
->size
)) {
4241 error_setg(errp
, "pmr backend size needs to be power of 2 in size");
4245 host_memory_backend_set_mapped(n
->pmr
.dev
, true);
4248 if (n
->params
.zasl_bs
) {
4249 if (!is_power_of_2(n
->params
.zasl_bs
)) {
4250 error_setg(errp
, "zone append size limit has to be a power of 2");
4256 static void nvme_init_state(NvmeCtrl
*n
)
4258 n
->num_namespaces
= NVME_MAX_NAMESPACES
;
4259 /* add one to max_ioqpairs to account for the admin queue pair */
4260 n
->reg_size
= pow2ceil(sizeof(NvmeBar
) +
4261 2 * (n
->params
.max_ioqpairs
+ 1) * NVME_DB_SIZE
);
4262 n
->sq
= g_new0(NvmeSQueue
*, n
->params
.max_ioqpairs
+ 1);
4263 n
->cq
= g_new0(NvmeCQueue
*, n
->params
.max_ioqpairs
+ 1);
4264 n
->temperature
= NVME_TEMPERATURE
;
4265 n
->features
.temp_thresh_hi
= NVME_TEMPERATURE_WARNING
;
4266 n
->starttime_ms
= qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
);
4267 n
->aer_reqs
= g_new0(NvmeRequest
*, n
->params
.aerl
+ 1);
4270 int nvme_register_namespace(NvmeCtrl
*n
, NvmeNamespace
*ns
, Error
**errp
)
4272 uint32_t nsid
= nvme_nsid(ns
);
4274 if (nsid
> NVME_MAX_NAMESPACES
) {
4275 error_setg(errp
, "invalid namespace id (must be between 0 and %d)",
4276 NVME_MAX_NAMESPACES
);
4281 for (int i
= 1; i
<= n
->num_namespaces
; i
++) {
4282 if (!nvme_ns(n
, i
)) {
4283 nsid
= ns
->params
.nsid
= i
;
4289 error_setg(errp
, "no free namespace id");
4293 if (n
->namespaces
[nsid
- 1]) {
4294 error_setg(errp
, "namespace id '%d' is already in use", nsid
);
4299 trace_pci_nvme_register_namespace(nsid
);
4301 n
->namespaces
[nsid
- 1] = ns
;
4306 static void nvme_init_cmb(NvmeCtrl
*n
, PCIDevice
*pci_dev
)
4308 uint64_t cmb_size
= n
->params
.cmb_size_mb
* MiB
;
4310 n
->cmb
.buf
= g_malloc0(cmb_size
);
4311 memory_region_init_io(&n
->cmb
.mem
, OBJECT(n
), &nvme_cmb_ops
, n
,
4312 "nvme-cmb", cmb_size
);
4313 pci_register_bar(pci_dev
, NVME_CMB_BIR
,
4314 PCI_BASE_ADDRESS_SPACE_MEMORY
|
4315 PCI_BASE_ADDRESS_MEM_TYPE_64
|
4316 PCI_BASE_ADDRESS_MEM_PREFETCH
, &n
->cmb
.mem
);
4318 NVME_CAP_SET_CMBS(n
->bar
.cap
, 1);
4320 if (n
->params
.legacy_cmb
) {
4321 nvme_cmb_enable_regs(n
);
4326 static void nvme_init_pmr(NvmeCtrl
*n
, PCIDevice
*pci_dev
)
4328 NVME_PMRCAP_SET_RDS(n
->bar
.pmrcap
, 1);
4329 NVME_PMRCAP_SET_WDS(n
->bar
.pmrcap
, 1);
4330 NVME_PMRCAP_SET_BIR(n
->bar
.pmrcap
, NVME_PMR_BIR
);
4331 /* Turn on bit 1 support */
4332 NVME_PMRCAP_SET_PMRWBM(n
->bar
.pmrcap
, 0x02);
4333 NVME_PMRCAP_SET_CMSS(n
->bar
.pmrcap
, 1);
4335 pci_register_bar(pci_dev
, NVME_PMRCAP_BIR(n
->bar
.pmrcap
),
4336 PCI_BASE_ADDRESS_SPACE_MEMORY
|
4337 PCI_BASE_ADDRESS_MEM_TYPE_64
|
4338 PCI_BASE_ADDRESS_MEM_PREFETCH
, &n
->pmr
.dev
->mr
);
4340 memory_region_set_enabled(&n
->pmr
.dev
->mr
, false);
4343 static int nvme_init_pci(NvmeCtrl
*n
, PCIDevice
*pci_dev
, Error
**errp
)
4345 uint8_t *pci_conf
= pci_dev
->config
;
4346 uint64_t bar_size
, msix_table_size
, msix_pba_size
;
4347 unsigned msix_table_offset
, msix_pba_offset
;
4352 pci_conf
[PCI_INTERRUPT_PIN
] = 1;
4353 pci_config_set_prog_interface(pci_conf
, 0x2);
4355 if (n
->params
.use_intel_id
) {
4356 pci_config_set_vendor_id(pci_conf
, PCI_VENDOR_ID_INTEL
);
4357 pci_config_set_device_id(pci_conf
, 0x5845);
4359 pci_config_set_vendor_id(pci_conf
, PCI_VENDOR_ID_REDHAT
);
4360 pci_config_set_device_id(pci_conf
, PCI_DEVICE_ID_REDHAT_NVME
);
4363 pci_config_set_class(pci_conf
, PCI_CLASS_STORAGE_EXPRESS
);
4364 pcie_endpoint_cap_init(pci_dev
, 0x80);
4366 bar_size
= QEMU_ALIGN_UP(n
->reg_size
, 4 * KiB
);
4367 msix_table_offset
= bar_size
;
4368 msix_table_size
= PCI_MSIX_ENTRY_SIZE
* n
->params
.msix_qsize
;
4370 bar_size
+= msix_table_size
;
4371 bar_size
= QEMU_ALIGN_UP(bar_size
, 4 * KiB
);
4372 msix_pba_offset
= bar_size
;
4373 msix_pba_size
= QEMU_ALIGN_UP(n
->params
.msix_qsize
, 64) / 8;
4375 bar_size
+= msix_pba_size
;
4376 bar_size
= pow2ceil(bar_size
);
4378 memory_region_init(&n
->bar0
, OBJECT(n
), "nvme-bar0", bar_size
);
4379 memory_region_init_io(&n
->iomem
, OBJECT(n
), &nvme_mmio_ops
, n
, "nvme",
4381 memory_region_add_subregion(&n
->bar0
, 0, &n
->iomem
);
4383 pci_register_bar(pci_dev
, 0, PCI_BASE_ADDRESS_SPACE_MEMORY
|
4384 PCI_BASE_ADDRESS_MEM_TYPE_64
, &n
->bar0
);
4385 ret
= msix_init(pci_dev
, n
->params
.msix_qsize
,
4386 &n
->bar0
, 0, msix_table_offset
,
4387 &n
->bar0
, 0, msix_pba_offset
, 0, &err
);
4389 if (ret
== -ENOTSUP
) {
4390 warn_report_err(err
);
4392 error_propagate(errp
, err
);
4397 if (n
->params
.cmb_size_mb
) {
4398 nvme_init_cmb(n
, pci_dev
);
4402 nvme_init_pmr(n
, pci_dev
);
4408 static void nvme_init_ctrl(NvmeCtrl
*n
, PCIDevice
*pci_dev
)
4410 NvmeIdCtrl
*id
= &n
->id_ctrl
;
4411 uint8_t *pci_conf
= pci_dev
->config
;
4414 id
->vid
= cpu_to_le16(pci_get_word(pci_conf
+ PCI_VENDOR_ID
));
4415 id
->ssvid
= cpu_to_le16(pci_get_word(pci_conf
+ PCI_SUBSYSTEM_VENDOR_ID
));
4416 strpadcpy((char *)id
->mn
, sizeof(id
->mn
), "QEMU NVMe Ctrl", ' ');
4417 strpadcpy((char *)id
->fr
, sizeof(id
->fr
), "1.0", ' ');
4418 strpadcpy((char *)id
->sn
, sizeof(id
->sn
), n
->params
.serial
, ' ');
4423 id
->mdts
= n
->params
.mdts
;
4424 id
->ver
= cpu_to_le32(NVME_SPEC_VER
);
4425 id
->oacs
= cpu_to_le16(0);
4426 id
->cntrltype
= 0x1;
4429 * Because the controller always completes the Abort command immediately,
4430 * there can never be more than one concurrently executing Abort command,
4431 * so this value is never used for anything. Note that there can easily be
4432 * many Abort commands in the queues, but they are not considered
4433 * "executing" until processed by nvme_abort.
4435 * The specification recommends a value of 3 for Abort Command Limit (four
4436 * concurrently outstanding Abort commands), so lets use that though it is
4440 id
->aerl
= n
->params
.aerl
;
4441 id
->frmw
= (NVME_NUM_FW_SLOTS
<< 1) | NVME_FRMW_SLOT1_RO
;
4442 id
->lpa
= NVME_LPA_NS_SMART
| NVME_LPA_CSE
| NVME_LPA_EXTENDED
;
4444 /* recommended default value (~70 C) */
4445 id
->wctemp
= cpu_to_le16(NVME_TEMPERATURE_WARNING
);
4446 id
->cctemp
= cpu_to_le16(NVME_TEMPERATURE_CRITICAL
);
4448 id
->sqes
= (0x6 << 4) | 0x6;
4449 id
->cqes
= (0x4 << 4) | 0x4;
4450 id
->nn
= cpu_to_le32(n
->num_namespaces
);
4451 id
->oncs
= cpu_to_le16(NVME_ONCS_WRITE_ZEROES
| NVME_ONCS_TIMESTAMP
|
4452 NVME_ONCS_FEATURES
| NVME_ONCS_DSM
|
4455 id
->vwc
= (0x2 << 1) | 0x1;
4456 id
->sgls
= cpu_to_le32(NVME_CTRL_SGLS_SUPPORT_NO_ALIGN
|
4457 NVME_CTRL_SGLS_BITBUCKET
);
4459 subnqn
= g_strdup_printf("nqn.2019-08.org.qemu:%s", n
->params
.serial
);
4460 strpadcpy((char *)id
->subnqn
, sizeof(id
->subnqn
), subnqn
, '\0');
4463 id
->psd
[0].mp
= cpu_to_le16(0x9c4);
4464 id
->psd
[0].enlat
= cpu_to_le32(0x10);
4465 id
->psd
[0].exlat
= cpu_to_le32(0x4);
4467 NVME_CAP_SET_MQES(n
->bar
.cap
, 0x7ff);
4468 NVME_CAP_SET_CQR(n
->bar
.cap
, 1);
4469 NVME_CAP_SET_TO(n
->bar
.cap
, 0xf);
4470 NVME_CAP_SET_CSS(n
->bar
.cap
, NVME_CAP_CSS_NVM
);
4471 NVME_CAP_SET_CSS(n
->bar
.cap
, NVME_CAP_CSS_CSI_SUPP
);
4472 NVME_CAP_SET_CSS(n
->bar
.cap
, NVME_CAP_CSS_ADMIN_ONLY
);
4473 NVME_CAP_SET_MPSMAX(n
->bar
.cap
, 4);
4474 NVME_CAP_SET_CMBS(n
->bar
.cap
, n
->params
.cmb_size_mb
? 1 : 0);
4475 NVME_CAP_SET_PMRS(n
->bar
.cap
, n
->pmr
.dev
? 1 : 0);
4477 n
->bar
.vs
= NVME_SPEC_VER
;
4478 n
->bar
.intmc
= n
->bar
.intms
= 0;
4481 static void nvme_realize(PCIDevice
*pci_dev
, Error
**errp
)
4483 NvmeCtrl
*n
= NVME(pci_dev
);
4485 Error
*local_err
= NULL
;
4487 nvme_check_constraints(n
, &local_err
);
4489 error_propagate(errp
, local_err
);
4493 qbus_create_inplace(&n
->bus
, sizeof(NvmeBus
), TYPE_NVME_BUS
,
4494 &pci_dev
->qdev
, n
->parent_obj
.qdev
.id
);
4497 if (nvme_init_pci(n
, pci_dev
, errp
)) {
4501 nvme_init_ctrl(n
, pci_dev
);
4503 /* setup a namespace if the controller drive property was given */
4504 if (n
->namespace.blkconf
.blk
) {
4506 ns
->params
.nsid
= 1;
4508 if (nvme_ns_setup(ns
, errp
)) {
4512 if (nvme_register_namespace(n
, ns
, errp
)) {
4518 static void nvme_exit(PCIDevice
*pci_dev
)
4520 NvmeCtrl
*n
= NVME(pci_dev
);
4526 for (i
= 1; i
<= n
->num_namespaces
; i
++) {
4532 nvme_ns_cleanup(ns
);
4537 g_free(n
->aer_reqs
);
4539 if (n
->params
.cmb_size_mb
) {
4544 host_memory_backend_set_mapped(n
->pmr
.dev
, false);
4546 msix_uninit_exclusive_bar(pci_dev
);
4549 static Property nvme_props
[] = {
4550 DEFINE_BLOCK_PROPERTIES(NvmeCtrl
, namespace.blkconf
),
4551 DEFINE_PROP_LINK("pmrdev", NvmeCtrl
, pmr
.dev
, TYPE_MEMORY_BACKEND
,
4552 HostMemoryBackend
*),
4553 DEFINE_PROP_STRING("serial", NvmeCtrl
, params
.serial
),
4554 DEFINE_PROP_UINT32("cmb_size_mb", NvmeCtrl
, params
.cmb_size_mb
, 0),
4555 DEFINE_PROP_UINT32("num_queues", NvmeCtrl
, params
.num_queues
, 0),
4556 DEFINE_PROP_UINT32("max_ioqpairs", NvmeCtrl
, params
.max_ioqpairs
, 64),
4557 DEFINE_PROP_UINT16("msix_qsize", NvmeCtrl
, params
.msix_qsize
, 65),
4558 DEFINE_PROP_UINT8("aerl", NvmeCtrl
, params
.aerl
, 3),
4559 DEFINE_PROP_UINT32("aer_max_queued", NvmeCtrl
, params
.aer_max_queued
, 64),
4560 DEFINE_PROP_UINT8("mdts", NvmeCtrl
, params
.mdts
, 7),
4561 DEFINE_PROP_BOOL("use-intel-id", NvmeCtrl
, params
.use_intel_id
, false),
4562 DEFINE_PROP_BOOL("legacy-cmb", NvmeCtrl
, params
.legacy_cmb
, false),
4563 DEFINE_PROP_SIZE32("zoned.append_size_limit", NvmeCtrl
, params
.zasl_bs
,
4564 NVME_DEFAULT_MAX_ZA_SIZE
),
4565 DEFINE_PROP_END_OF_LIST(),
4568 static void nvme_get_smart_warning(Object
*obj
, Visitor
*v
, const char *name
,
4569 void *opaque
, Error
**errp
)
4571 NvmeCtrl
*n
= NVME(obj
);
4572 uint8_t value
= n
->smart_critical_warning
;
4574 visit_type_uint8(v
, name
, &value
, errp
);
4577 static void nvme_set_smart_warning(Object
*obj
, Visitor
*v
, const char *name
,
4578 void *opaque
, Error
**errp
)
4580 NvmeCtrl
*n
= NVME(obj
);
4581 uint8_t value
, old_value
, cap
= 0, index
, event
;
4583 if (!visit_type_uint8(v
, name
, &value
, errp
)) {
4587 cap
= NVME_SMART_SPARE
| NVME_SMART_TEMPERATURE
| NVME_SMART_RELIABILITY
4588 | NVME_SMART_MEDIA_READ_ONLY
| NVME_SMART_FAILED_VOLATILE_MEDIA
;
4589 if (NVME_CAP_PMRS(n
->bar
.cap
)) {
4590 cap
|= NVME_SMART_PMR_UNRELIABLE
;
4593 if ((value
& cap
) != value
) {
4594 error_setg(errp
, "unsupported smart critical warning bits: 0x%x",
4599 old_value
= n
->smart_critical_warning
;
4600 n
->smart_critical_warning
= value
;
4602 /* only inject new bits of smart critical warning */
4603 for (index
= 0; index
< NVME_SMART_WARN_MAX
; index
++) {
4605 if (value
& ~old_value
& event
)
4606 nvme_smart_event(n
, event
);
4610 static const VMStateDescription nvme_vmstate
= {
4615 static void nvme_class_init(ObjectClass
*oc
, void *data
)
4617 DeviceClass
*dc
= DEVICE_CLASS(oc
);
4618 PCIDeviceClass
*pc
= PCI_DEVICE_CLASS(oc
);
4620 pc
->realize
= nvme_realize
;
4621 pc
->exit
= nvme_exit
;
4622 pc
->class_id
= PCI_CLASS_STORAGE_EXPRESS
;
4625 set_bit(DEVICE_CATEGORY_STORAGE
, dc
->categories
);
4626 dc
->desc
= "Non-Volatile Memory Express";
4627 device_class_set_props(dc
, nvme_props
);
4628 dc
->vmsd
= &nvme_vmstate
;
4631 static void nvme_instance_init(Object
*obj
)
4633 NvmeCtrl
*n
= NVME(obj
);
4635 if (n
->namespace.blkconf
.blk
) {
4636 device_add_bootindex_property(obj
, &n
->namespace.blkconf
.bootindex
,
4637 "bootindex", "/namespace@1,0",
4641 object_property_add(obj
, "smart_critical_warning", "uint8",
4642 nvme_get_smart_warning
,
4643 nvme_set_smart_warning
, NULL
, NULL
);
4646 static const TypeInfo nvme_info
= {
4648 .parent
= TYPE_PCI_DEVICE
,
4649 .instance_size
= sizeof(NvmeCtrl
),
4650 .instance_init
= nvme_instance_init
,
4651 .class_init
= nvme_class_init
,
4652 .interfaces
= (InterfaceInfo
[]) {
4653 { INTERFACE_PCIE_DEVICE
},
4658 static const TypeInfo nvme_bus_info
= {
4659 .name
= TYPE_NVME_BUS
,
4661 .instance_size
= sizeof(NvmeBus
),
4664 static void nvme_register_types(void)
4666 type_register_static(&nvme_info
);
4667 type_register_static(&nvme_bus_info
);
4670 type_init(nvme_register_types
)