2 * QEMU NVM Express Controller
4 * Copyright (c) 2012, Intel Corporation
6 * Written by Keith Busch <keith.busch@intel.com>
8 * This code is licensed under the GNU GPL v2 or later.
12 * Reference Specs: http://www.nvmexpress.org, 1.4, 1.3, 1.2, 1.1, 1.0e
14 * https://nvmexpress.org/developers/nvme-specification/
19 * -drive file=<file>,if=none,id=<drive_id>
20 * -device nvme-subsys,id=<subsys_id>,nqn=<nqn_id>
21 * -device nvme,serial=<serial>,id=<bus_name>, \
22 * cmb_size_mb=<cmb_size_mb[optional]>, \
23 * [pmrdev=<mem_backend_file_id>,] \
24 * max_ioqpairs=<N[optional]>, \
25 * aerl=<N[optional]>,aer_max_queued=<N[optional]>, \
26 * mdts=<N[optional]>,vsl=<N[optional]>, \
27 * zoned.zasl=<N[optional]>, \
29 * -device nvme-ns,drive=<drive_id>,bus=<bus_name>,nsid=<nsid>,\
30 * zoned=<true|false[optional]>, \
31 * subsys=<subsys_id>,detached=<true|false[optional]>
33 * Note cmb_size_mb denotes size of CMB in MB. CMB is assumed to be at
34 * offset 0 in BAR2 and supports only WDS, RDS and SQS for now. By default, the
35 * device will use the "v1.4 CMB scheme" - use the `legacy-cmb` parameter to
36 * always enable the CMBLOC and CMBSZ registers (v1.3 behavior).
38 * Enabling pmr emulation can be achieved by pointing to memory-backend-file.
40 * -object memory-backend-file,id=<mem_id>,share=on,mem-path=<file_path>, \
41 * size=<size> .... -device nvme,...,pmrdev=<mem_id>
43 * The PMR will use BAR 4/5 exclusively.
45 * To place controller(s) and namespace(s) to a subsystem, then provide
46 * nvme-subsys device as above.
48 * nvme subsystem device parameters
49 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
51 * This parameter provides the `<nqn_id>` part of the string
52 * `nqn.2019-08.org.qemu:<nqn_id>` which will be reported in the SUBNQN field
53 * of subsystem controllers. Note that `<nqn_id>` should be unique per
54 * subsystem, but this is not enforced by QEMU. If not specified, it will
55 * default to the value of the `id` parameter (`<subsys_id>`).
57 * nvme device parameters
58 * ~~~~~~~~~~~~~~~~~~~~~~
60 * Specifying this parameter attaches the controller to the subsystem and
61 * the SUBNQN field in the controller will report the NQN of the subsystem
62 * device. This also enables multi controller capability represented in
63 * Identify Controller data structure in CMIC (Controller Multi-path I/O and
64 * Namesapce Sharing Capabilities).
67 * The Asynchronous Event Request Limit (AERL). Indicates the maximum number
68 * of concurrently outstanding Asynchronous Event Request commands support
69 * by the controller. This is a 0's based value.
72 * This is the maximum number of events that the device will enqueue for
73 * completion when there are no outstanding AERs. When the maximum number of
74 * enqueued events are reached, subsequent events will be dropped.
77 * Indicates the maximum data transfer size for a command that transfers data
78 * between host-accessible memory and the controller. The value is specified
79 * as a power of two (2^n) and is in units of the minimum memory page size
80 * (CAP.MPSMIN). The default value is 7 (i.e. 512 KiB).
83 * Indicates the maximum data size limit for the Verify command. Like `mdts`,
84 * this value is specified as a power of two (2^n) and is in units of the
85 * minimum memory page size (CAP.MPSMIN). The default value is 7 (i.e. 512
89 * Indicates the maximum data transfer size for the Zone Append command. Like
90 * `mdts`, the value is specified as a power of two (2^n) and is in units of
91 * the minimum memory page size (CAP.MPSMIN). The default value is 0 (i.e.
92 * defaulting to the value of `mdts`).
94 * - `zoned.append_size_limit`
95 * The maximum I/O size in bytes that is allowed in Zone Append command.
96 * The default is 128KiB. Since internally this this value is maintained as
97 * ZASL = log2(<maximum append size> / <page size>), some values assigned
98 * to this property may be rounded down and result in a lower maximum ZA
99 * data size being in effect. By setting this property to 0, users can make
100 * ZASL to be equal to MDTS. This property only affects zoned namespaces.
102 * nvme namespace device parameters
103 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
105 * If given, the namespace will be attached to all controllers in the
106 * subsystem. Otherwise, `bus` must be given to attach this namespace to a
107 * specific controller as a non-shared namespace.
110 * This parameter is only valid together with the `subsys` parameter. If left
111 * at the default value (`false/off`), the namespace will be attached to all
112 * controllers in the NVMe subsystem at boot-up. If set to `true/on`, the
113 * namespace will be be available in the subsystem not not attached to any
116 * Setting `zoned` to true selects Zoned Command Set at the namespace.
117 * In this case, the following namespace properties are available to configure
119 * zoned.zone_size=<zone size in bytes, default: 128MiB>
120 * The number may be followed by K, M, G as in kilo-, mega- or giga-.
122 * zoned.zone_capacity=<zone capacity in bytes, default: zone size>
123 * The value 0 (default) forces zone capacity to be the same as zone
124 * size. The value of this property may not exceed zone size.
126 * zoned.descr_ext_size=<zone descriptor extension size, default 0>
127 * This value needs to be specified in 64B units. If it is zero,
128 * namespace(s) will not support zone descriptor extensions.
130 * zoned.max_active=<Maximum Active Resources (zones), default: 0>
131 * The default value means there is no limit to the number of
132 * concurrently active zones.
134 * zoned.max_open=<Maximum Open Resources (zones), default: 0>
135 * The default value means there is no limit to the number of
136 * concurrently open zones.
138 * zoned.cross_read=<enable RAZB, default: false>
139 * Setting this property to true enables Read Across Zone Boundaries.
142 #include "qemu/osdep.h"
143 #include "qemu/units.h"
144 #include "qemu/error-report.h"
145 #include "hw/block/block.h"
146 #include "hw/pci/msix.h"
147 #include "hw/pci/pci.h"
148 #include "hw/qdev-properties.h"
149 #include "migration/vmstate.h"
150 #include "sysemu/sysemu.h"
151 #include "qapi/error.h"
152 #include "qapi/visitor.h"
153 #include "sysemu/hostmem.h"
154 #include "sysemu/block-backend.h"
155 #include "exec/memory.h"
156 #include "qemu/log.h"
157 #include "qemu/module.h"
158 #include "qemu/cutils.h"
162 #include "nvme-dif.h"
164 #define NVME_MAX_IOQPAIRS 0xffff
165 #define NVME_DB_SIZE 4
166 #define NVME_SPEC_VER 0x00010400
167 #define NVME_CMB_BIR 2
168 #define NVME_PMR_BIR 4
169 #define NVME_TEMPERATURE 0x143
170 #define NVME_TEMPERATURE_WARNING 0x157
171 #define NVME_TEMPERATURE_CRITICAL 0x175
172 #define NVME_NUM_FW_SLOTS 1
174 #define NVME_GUEST_ERR(trace, fmt, ...) \
176 (trace_##trace)(__VA_ARGS__); \
177 qemu_log_mask(LOG_GUEST_ERROR, #trace \
178 " in %s: " fmt "\n", __func__, ## __VA_ARGS__); \
181 static const bool nvme_feature_support
[NVME_FID_MAX
] = {
182 [NVME_ARBITRATION
] = true,
183 [NVME_POWER_MANAGEMENT
] = true,
184 [NVME_TEMPERATURE_THRESHOLD
] = true,
185 [NVME_ERROR_RECOVERY
] = true,
186 [NVME_VOLATILE_WRITE_CACHE
] = true,
187 [NVME_NUMBER_OF_QUEUES
] = true,
188 [NVME_INTERRUPT_COALESCING
] = true,
189 [NVME_INTERRUPT_VECTOR_CONF
] = true,
190 [NVME_WRITE_ATOMICITY
] = true,
191 [NVME_ASYNCHRONOUS_EVENT_CONF
] = true,
192 [NVME_TIMESTAMP
] = true,
195 static const uint32_t nvme_feature_cap
[NVME_FID_MAX
] = {
196 [NVME_TEMPERATURE_THRESHOLD
] = NVME_FEAT_CAP_CHANGE
,
197 [NVME_ERROR_RECOVERY
] = NVME_FEAT_CAP_CHANGE
| NVME_FEAT_CAP_NS
,
198 [NVME_VOLATILE_WRITE_CACHE
] = NVME_FEAT_CAP_CHANGE
,
199 [NVME_NUMBER_OF_QUEUES
] = NVME_FEAT_CAP_CHANGE
,
200 [NVME_ASYNCHRONOUS_EVENT_CONF
] = NVME_FEAT_CAP_CHANGE
,
201 [NVME_TIMESTAMP
] = NVME_FEAT_CAP_CHANGE
,
204 static const uint32_t nvme_cse_acs
[256] = {
205 [NVME_ADM_CMD_DELETE_SQ
] = NVME_CMD_EFF_CSUPP
,
206 [NVME_ADM_CMD_CREATE_SQ
] = NVME_CMD_EFF_CSUPP
,
207 [NVME_ADM_CMD_GET_LOG_PAGE
] = NVME_CMD_EFF_CSUPP
,
208 [NVME_ADM_CMD_DELETE_CQ
] = NVME_CMD_EFF_CSUPP
,
209 [NVME_ADM_CMD_CREATE_CQ
] = NVME_CMD_EFF_CSUPP
,
210 [NVME_ADM_CMD_IDENTIFY
] = NVME_CMD_EFF_CSUPP
,
211 [NVME_ADM_CMD_ABORT
] = NVME_CMD_EFF_CSUPP
,
212 [NVME_ADM_CMD_SET_FEATURES
] = NVME_CMD_EFF_CSUPP
,
213 [NVME_ADM_CMD_GET_FEATURES
] = NVME_CMD_EFF_CSUPP
,
214 [NVME_ADM_CMD_ASYNC_EV_REQ
] = NVME_CMD_EFF_CSUPP
,
215 [NVME_ADM_CMD_NS_ATTACHMENT
] = NVME_CMD_EFF_CSUPP
| NVME_CMD_EFF_NIC
,
216 [NVME_ADM_CMD_FORMAT_NVM
] = NVME_CMD_EFF_CSUPP
| NVME_CMD_EFF_LBCC
,
219 static const uint32_t nvme_cse_iocs_none
[256];
221 static const uint32_t nvme_cse_iocs_nvm
[256] = {
222 [NVME_CMD_FLUSH
] = NVME_CMD_EFF_CSUPP
| NVME_CMD_EFF_LBCC
,
223 [NVME_CMD_WRITE_ZEROES
] = NVME_CMD_EFF_CSUPP
| NVME_CMD_EFF_LBCC
,
224 [NVME_CMD_WRITE
] = NVME_CMD_EFF_CSUPP
| NVME_CMD_EFF_LBCC
,
225 [NVME_CMD_READ
] = NVME_CMD_EFF_CSUPP
,
226 [NVME_CMD_DSM
] = NVME_CMD_EFF_CSUPP
| NVME_CMD_EFF_LBCC
,
227 [NVME_CMD_VERIFY
] = NVME_CMD_EFF_CSUPP
,
228 [NVME_CMD_COPY
] = NVME_CMD_EFF_CSUPP
| NVME_CMD_EFF_LBCC
,
229 [NVME_CMD_COMPARE
] = NVME_CMD_EFF_CSUPP
,
232 static const uint32_t nvme_cse_iocs_zoned
[256] = {
233 [NVME_CMD_FLUSH
] = NVME_CMD_EFF_CSUPP
| NVME_CMD_EFF_LBCC
,
234 [NVME_CMD_WRITE_ZEROES
] = NVME_CMD_EFF_CSUPP
| NVME_CMD_EFF_LBCC
,
235 [NVME_CMD_WRITE
] = NVME_CMD_EFF_CSUPP
| NVME_CMD_EFF_LBCC
,
236 [NVME_CMD_READ
] = NVME_CMD_EFF_CSUPP
,
237 [NVME_CMD_DSM
] = NVME_CMD_EFF_CSUPP
| NVME_CMD_EFF_LBCC
,
238 [NVME_CMD_VERIFY
] = NVME_CMD_EFF_CSUPP
,
239 [NVME_CMD_COPY
] = NVME_CMD_EFF_CSUPP
| NVME_CMD_EFF_LBCC
,
240 [NVME_CMD_COMPARE
] = NVME_CMD_EFF_CSUPP
,
241 [NVME_CMD_ZONE_APPEND
] = NVME_CMD_EFF_CSUPP
| NVME_CMD_EFF_LBCC
,
242 [NVME_CMD_ZONE_MGMT_SEND
] = NVME_CMD_EFF_CSUPP
| NVME_CMD_EFF_LBCC
,
243 [NVME_CMD_ZONE_MGMT_RECV
] = NVME_CMD_EFF_CSUPP
,
246 static void nvme_process_sq(void *opaque
);
248 static uint16_t nvme_sqid(NvmeRequest
*req
)
250 return le16_to_cpu(req
->sq
->sqid
);
253 static void nvme_assign_zone_state(NvmeNamespace
*ns
, NvmeZone
*zone
,
256 if (QTAILQ_IN_USE(zone
, entry
)) {
257 switch (nvme_get_zone_state(zone
)) {
258 case NVME_ZONE_STATE_EXPLICITLY_OPEN
:
259 QTAILQ_REMOVE(&ns
->exp_open_zones
, zone
, entry
);
261 case NVME_ZONE_STATE_IMPLICITLY_OPEN
:
262 QTAILQ_REMOVE(&ns
->imp_open_zones
, zone
, entry
);
264 case NVME_ZONE_STATE_CLOSED
:
265 QTAILQ_REMOVE(&ns
->closed_zones
, zone
, entry
);
267 case NVME_ZONE_STATE_FULL
:
268 QTAILQ_REMOVE(&ns
->full_zones
, zone
, entry
);
274 nvme_set_zone_state(zone
, state
);
277 case NVME_ZONE_STATE_EXPLICITLY_OPEN
:
278 QTAILQ_INSERT_TAIL(&ns
->exp_open_zones
, zone
, entry
);
280 case NVME_ZONE_STATE_IMPLICITLY_OPEN
:
281 QTAILQ_INSERT_TAIL(&ns
->imp_open_zones
, zone
, entry
);
283 case NVME_ZONE_STATE_CLOSED
:
284 QTAILQ_INSERT_TAIL(&ns
->closed_zones
, zone
, entry
);
286 case NVME_ZONE_STATE_FULL
:
287 QTAILQ_INSERT_TAIL(&ns
->full_zones
, zone
, entry
);
288 case NVME_ZONE_STATE_READ_ONLY
:
296 * Check if we can open a zone without exceeding open/active limits.
297 * AOR stands for "Active and Open Resources" (see TP 4053 section 2.5).
299 static int nvme_aor_check(NvmeNamespace
*ns
, uint32_t act
, uint32_t opn
)
301 if (ns
->params
.max_active_zones
!= 0 &&
302 ns
->nr_active_zones
+ act
> ns
->params
.max_active_zones
) {
303 trace_pci_nvme_err_insuff_active_res(ns
->params
.max_active_zones
);
304 return NVME_ZONE_TOO_MANY_ACTIVE
| NVME_DNR
;
306 if (ns
->params
.max_open_zones
!= 0 &&
307 ns
->nr_open_zones
+ opn
> ns
->params
.max_open_zones
) {
308 trace_pci_nvme_err_insuff_open_res(ns
->params
.max_open_zones
);
309 return NVME_ZONE_TOO_MANY_OPEN
| NVME_DNR
;
315 static bool nvme_addr_is_cmb(NvmeCtrl
*n
, hwaddr addr
)
323 lo
= n
->params
.legacy_cmb
? n
->cmb
.mem
.addr
: n
->cmb
.cba
;
324 hi
= lo
+ int128_get64(n
->cmb
.mem
.size
);
326 return addr
>= lo
&& addr
< hi
;
329 static inline void *nvme_addr_to_cmb(NvmeCtrl
*n
, hwaddr addr
)
331 hwaddr base
= n
->params
.legacy_cmb
? n
->cmb
.mem
.addr
: n
->cmb
.cba
;
332 return &n
->cmb
.buf
[addr
- base
];
335 static bool nvme_addr_is_pmr(NvmeCtrl
*n
, hwaddr addr
)
343 hi
= n
->pmr
.cba
+ int128_get64(n
->pmr
.dev
->mr
.size
);
345 return addr
>= n
->pmr
.cba
&& addr
< hi
;
348 static inline void *nvme_addr_to_pmr(NvmeCtrl
*n
, hwaddr addr
)
350 return memory_region_get_ram_ptr(&n
->pmr
.dev
->mr
) + (addr
- n
->pmr
.cba
);
353 static int nvme_addr_read(NvmeCtrl
*n
, hwaddr addr
, void *buf
, int size
)
355 hwaddr hi
= addr
+ size
- 1;
360 if (n
->bar
.cmbsz
&& nvme_addr_is_cmb(n
, addr
) && nvme_addr_is_cmb(n
, hi
)) {
361 memcpy(buf
, nvme_addr_to_cmb(n
, addr
), size
);
365 if (nvme_addr_is_pmr(n
, addr
) && nvme_addr_is_pmr(n
, hi
)) {
366 memcpy(buf
, nvme_addr_to_pmr(n
, addr
), size
);
370 return pci_dma_read(&n
->parent_obj
, addr
, buf
, size
);
373 static int nvme_addr_write(NvmeCtrl
*n
, hwaddr addr
, void *buf
, int size
)
375 hwaddr hi
= addr
+ size
- 1;
380 if (n
->bar
.cmbsz
&& nvme_addr_is_cmb(n
, addr
) && nvme_addr_is_cmb(n
, hi
)) {
381 memcpy(nvme_addr_to_cmb(n
, addr
), buf
, size
);
385 if (nvme_addr_is_pmr(n
, addr
) && nvme_addr_is_pmr(n
, hi
)) {
386 memcpy(nvme_addr_to_pmr(n
, addr
), buf
, size
);
390 return pci_dma_write(&n
->parent_obj
, addr
, buf
, size
);
393 static bool nvme_nsid_valid(NvmeCtrl
*n
, uint32_t nsid
)
395 return nsid
&& (nsid
== NVME_NSID_BROADCAST
|| nsid
<= n
->num_namespaces
);
398 static int nvme_check_sqid(NvmeCtrl
*n
, uint16_t sqid
)
400 return sqid
< n
->params
.max_ioqpairs
+ 1 && n
->sq
[sqid
] != NULL
? 0 : -1;
403 static int nvme_check_cqid(NvmeCtrl
*n
, uint16_t cqid
)
405 return cqid
< n
->params
.max_ioqpairs
+ 1 && n
->cq
[cqid
] != NULL
? 0 : -1;
408 static void nvme_inc_cq_tail(NvmeCQueue
*cq
)
411 if (cq
->tail
>= cq
->size
) {
413 cq
->phase
= !cq
->phase
;
417 static void nvme_inc_sq_head(NvmeSQueue
*sq
)
419 sq
->head
= (sq
->head
+ 1) % sq
->size
;
422 static uint8_t nvme_cq_full(NvmeCQueue
*cq
)
424 return (cq
->tail
+ 1) % cq
->size
== cq
->head
;
427 static uint8_t nvme_sq_empty(NvmeSQueue
*sq
)
429 return sq
->head
== sq
->tail
;
432 static void nvme_irq_check(NvmeCtrl
*n
)
434 if (msix_enabled(&(n
->parent_obj
))) {
437 if (~n
->bar
.intms
& n
->irq_status
) {
438 pci_irq_assert(&n
->parent_obj
);
440 pci_irq_deassert(&n
->parent_obj
);
444 static void nvme_irq_assert(NvmeCtrl
*n
, NvmeCQueue
*cq
)
446 if (cq
->irq_enabled
) {
447 if (msix_enabled(&(n
->parent_obj
))) {
448 trace_pci_nvme_irq_msix(cq
->vector
);
449 msix_notify(&(n
->parent_obj
), cq
->vector
);
451 trace_pci_nvme_irq_pin();
452 assert(cq
->vector
< 32);
453 n
->irq_status
|= 1 << cq
->vector
;
457 trace_pci_nvme_irq_masked();
461 static void nvme_irq_deassert(NvmeCtrl
*n
, NvmeCQueue
*cq
)
463 if (cq
->irq_enabled
) {
464 if (msix_enabled(&(n
->parent_obj
))) {
467 assert(cq
->vector
< 32);
468 n
->irq_status
&= ~(1 << cq
->vector
);
474 static void nvme_req_clear(NvmeRequest
*req
)
478 memset(&req
->cqe
, 0x0, sizeof(req
->cqe
));
479 req
->status
= NVME_SUCCESS
;
482 static inline void nvme_sg_init(NvmeCtrl
*n
, NvmeSg
*sg
, bool dma
)
485 pci_dma_sglist_init(&sg
->qsg
, &n
->parent_obj
, 0);
486 sg
->flags
= NVME_SG_DMA
;
488 qemu_iovec_init(&sg
->iov
, 0);
491 sg
->flags
|= NVME_SG_ALLOC
;
494 static inline void nvme_sg_unmap(NvmeSg
*sg
)
496 if (!(sg
->flags
& NVME_SG_ALLOC
)) {
500 if (sg
->flags
& NVME_SG_DMA
) {
501 qemu_sglist_destroy(&sg
->qsg
);
503 qemu_iovec_destroy(&sg
->iov
);
506 memset(sg
, 0x0, sizeof(*sg
));
510 * When metadata is transfered as extended LBAs, the DPTR mapped into `sg`
511 * holds both data and metadata. This function splits the data and metadata
512 * into two separate QSG/IOVs.
514 static void nvme_sg_split(NvmeSg
*sg
, NvmeNamespace
*ns
, NvmeSg
*data
,
518 size_t size
= nvme_lsize(ns
);
519 size_t msize
= nvme_msize(ns
);
520 uint32_t trans_len
, count
= size
;
522 bool dma
= sg
->flags
& NVME_SG_DMA
;
524 size_t sg_len
= dma
? sg
->qsg
.size
: sg
->iov
.size
;
527 assert(sg
->flags
& NVME_SG_ALLOC
);
530 sge_len
= dma
? sg
->qsg
.sg
[sg_idx
].len
: sg
->iov
.iov
[sg_idx
].iov_len
;
532 trans_len
= MIN(sg_len
, count
);
533 trans_len
= MIN(trans_len
, sge_len
- offset
);
537 qemu_sglist_add(&dst
->qsg
, sg
->qsg
.sg
[sg_idx
].base
+ offset
,
540 qemu_iovec_add(&dst
->iov
,
541 sg
->iov
.iov
[sg_idx
].iov_base
+ offset
,
551 dst
= (dst
== data
) ? mdata
: data
;
552 count
= (dst
== data
) ? size
: msize
;
555 if (sge_len
== offset
) {
562 static uint16_t nvme_map_addr_cmb(NvmeCtrl
*n
, QEMUIOVector
*iov
, hwaddr addr
,
569 trace_pci_nvme_map_addr_cmb(addr
, len
);
571 if (!nvme_addr_is_cmb(n
, addr
) || !nvme_addr_is_cmb(n
, addr
+ len
- 1)) {
572 return NVME_DATA_TRAS_ERROR
;
575 qemu_iovec_add(iov
, nvme_addr_to_cmb(n
, addr
), len
);
580 static uint16_t nvme_map_addr_pmr(NvmeCtrl
*n
, QEMUIOVector
*iov
, hwaddr addr
,
587 if (!nvme_addr_is_pmr(n
, addr
) || !nvme_addr_is_pmr(n
, addr
+ len
- 1)) {
588 return NVME_DATA_TRAS_ERROR
;
591 qemu_iovec_add(iov
, nvme_addr_to_pmr(n
, addr
), len
);
596 static uint16_t nvme_map_addr(NvmeCtrl
*n
, NvmeSg
*sg
, hwaddr addr
, size_t len
)
598 bool cmb
= false, pmr
= false;
604 trace_pci_nvme_map_addr(addr
, len
);
606 if (nvme_addr_is_cmb(n
, addr
)) {
608 } else if (nvme_addr_is_pmr(n
, addr
)) {
613 if (sg
->flags
& NVME_SG_DMA
) {
614 return NVME_INVALID_USE_OF_CMB
| NVME_DNR
;
618 return nvme_map_addr_cmb(n
, &sg
->iov
, addr
, len
);
620 return nvme_map_addr_pmr(n
, &sg
->iov
, addr
, len
);
624 if (!(sg
->flags
& NVME_SG_DMA
)) {
625 return NVME_INVALID_USE_OF_CMB
| NVME_DNR
;
628 qemu_sglist_add(&sg
->qsg
, addr
, len
);
633 static inline bool nvme_addr_is_dma(NvmeCtrl
*n
, hwaddr addr
)
635 return !(nvme_addr_is_cmb(n
, addr
) || nvme_addr_is_pmr(n
, addr
));
638 static uint16_t nvme_map_prp(NvmeCtrl
*n
, NvmeSg
*sg
, uint64_t prp1
,
639 uint64_t prp2
, uint32_t len
)
641 hwaddr trans_len
= n
->page_size
- (prp1
% n
->page_size
);
642 trans_len
= MIN(len
, trans_len
);
643 int num_prps
= (len
>> n
->page_bits
) + 1;
647 trace_pci_nvme_map_prp(trans_len
, len
, prp1
, prp2
, num_prps
);
649 nvme_sg_init(n
, sg
, nvme_addr_is_dma(n
, prp1
));
651 status
= nvme_map_addr(n
, sg
, prp1
, trans_len
);
658 if (len
> n
->page_size
) {
659 uint64_t prp_list
[n
->max_prp_ents
];
660 uint32_t nents
, prp_trans
;
663 nents
= (len
+ n
->page_size
- 1) >> n
->page_bits
;
664 prp_trans
= MIN(n
->max_prp_ents
, nents
) * sizeof(uint64_t);
665 ret
= nvme_addr_read(n
, prp2
, (void *)prp_list
, prp_trans
);
667 trace_pci_nvme_err_addr_read(prp2
);
668 status
= NVME_DATA_TRAS_ERROR
;
672 uint64_t prp_ent
= le64_to_cpu(prp_list
[i
]);
674 if (i
== n
->max_prp_ents
- 1 && len
> n
->page_size
) {
675 if (unlikely(prp_ent
& (n
->page_size
- 1))) {
676 trace_pci_nvme_err_invalid_prplist_ent(prp_ent
);
677 status
= NVME_INVALID_PRP_OFFSET
| NVME_DNR
;
682 nents
= (len
+ n
->page_size
- 1) >> n
->page_bits
;
683 prp_trans
= MIN(n
->max_prp_ents
, nents
) * sizeof(uint64_t);
684 ret
= nvme_addr_read(n
, prp_ent
, (void *)prp_list
,
687 trace_pci_nvme_err_addr_read(prp_ent
);
688 status
= NVME_DATA_TRAS_ERROR
;
691 prp_ent
= le64_to_cpu(prp_list
[i
]);
694 if (unlikely(prp_ent
& (n
->page_size
- 1))) {
695 trace_pci_nvme_err_invalid_prplist_ent(prp_ent
);
696 status
= NVME_INVALID_PRP_OFFSET
| NVME_DNR
;
700 trans_len
= MIN(len
, n
->page_size
);
701 status
= nvme_map_addr(n
, sg
, prp_ent
, trans_len
);
710 if (unlikely(prp2
& (n
->page_size
- 1))) {
711 trace_pci_nvme_err_invalid_prp2_align(prp2
);
712 status
= NVME_INVALID_PRP_OFFSET
| NVME_DNR
;
715 status
= nvme_map_addr(n
, sg
, prp2
, len
);
730 * Map 'nsgld' data descriptors from 'segment'. The function will subtract the
731 * number of bytes mapped in len.
733 static uint16_t nvme_map_sgl_data(NvmeCtrl
*n
, NvmeSg
*sg
,
734 NvmeSglDescriptor
*segment
, uint64_t nsgld
,
735 size_t *len
, NvmeCmd
*cmd
)
737 dma_addr_t addr
, trans_len
;
741 for (int i
= 0; i
< nsgld
; i
++) {
742 uint8_t type
= NVME_SGL_TYPE(segment
[i
].type
);
745 case NVME_SGL_DESCR_TYPE_BIT_BUCKET
:
746 if (cmd
->opcode
== NVME_CMD_WRITE
) {
749 case NVME_SGL_DESCR_TYPE_DATA_BLOCK
:
751 case NVME_SGL_DESCR_TYPE_SEGMENT
:
752 case NVME_SGL_DESCR_TYPE_LAST_SEGMENT
:
753 return NVME_INVALID_NUM_SGL_DESCRS
| NVME_DNR
;
755 return NVME_SGL_DESCR_TYPE_INVALID
| NVME_DNR
;
758 dlen
= le32_to_cpu(segment
[i
].len
);
766 * All data has been mapped, but the SGL contains additional
767 * segments and/or descriptors. The controller might accept
768 * ignoring the rest of the SGL.
770 uint32_t sgls
= le32_to_cpu(n
->id_ctrl
.sgls
);
771 if (sgls
& NVME_CTRL_SGLS_EXCESS_LENGTH
) {
775 trace_pci_nvme_err_invalid_sgl_excess_length(dlen
);
776 return NVME_DATA_SGL_LEN_INVALID
| NVME_DNR
;
779 trans_len
= MIN(*len
, dlen
);
781 if (type
== NVME_SGL_DESCR_TYPE_BIT_BUCKET
) {
785 addr
= le64_to_cpu(segment
[i
].addr
);
787 if (UINT64_MAX
- addr
< dlen
) {
788 return NVME_DATA_SGL_LEN_INVALID
| NVME_DNR
;
791 status
= nvme_map_addr(n
, sg
, addr
, trans_len
);
803 static uint16_t nvme_map_sgl(NvmeCtrl
*n
, NvmeSg
*sg
, NvmeSglDescriptor sgl
,
804 size_t len
, NvmeCmd
*cmd
)
807 * Read the segment in chunks of 256 descriptors (one 4k page) to avoid
808 * dynamically allocating a potentially huge SGL. The spec allows the SGL
809 * to be larger (as in number of bytes required to describe the SGL
810 * descriptors and segment chain) than the command transfer size, so it is
811 * not bounded by MDTS.
813 const int SEG_CHUNK_SIZE
= 256;
815 NvmeSglDescriptor segment
[SEG_CHUNK_SIZE
], *sgld
, *last_sgld
;
823 addr
= le64_to_cpu(sgl
.addr
);
825 trace_pci_nvme_map_sgl(NVME_SGL_TYPE(sgl
.type
), len
);
827 nvme_sg_init(n
, sg
, nvme_addr_is_dma(n
, addr
));
830 * If the entire transfer can be described with a single data block it can
831 * be mapped directly.
833 if (NVME_SGL_TYPE(sgl
.type
) == NVME_SGL_DESCR_TYPE_DATA_BLOCK
) {
834 status
= nvme_map_sgl_data(n
, sg
, sgld
, 1, &len
, cmd
);
843 switch (NVME_SGL_TYPE(sgld
->type
)) {
844 case NVME_SGL_DESCR_TYPE_SEGMENT
:
845 case NVME_SGL_DESCR_TYPE_LAST_SEGMENT
:
848 return NVME_INVALID_SGL_SEG_DESCR
| NVME_DNR
;
851 seg_len
= le32_to_cpu(sgld
->len
);
853 /* check the length of the (Last) Segment descriptor */
854 if ((!seg_len
|| seg_len
& 0xf) &&
855 (NVME_SGL_TYPE(sgld
->type
) != NVME_SGL_DESCR_TYPE_BIT_BUCKET
)) {
856 return NVME_INVALID_SGL_SEG_DESCR
| NVME_DNR
;
859 if (UINT64_MAX
- addr
< seg_len
) {
860 return NVME_DATA_SGL_LEN_INVALID
| NVME_DNR
;
863 nsgld
= seg_len
/ sizeof(NvmeSglDescriptor
);
865 while (nsgld
> SEG_CHUNK_SIZE
) {
866 if (nvme_addr_read(n
, addr
, segment
, sizeof(segment
))) {
867 trace_pci_nvme_err_addr_read(addr
);
868 status
= NVME_DATA_TRAS_ERROR
;
872 status
= nvme_map_sgl_data(n
, sg
, segment
, SEG_CHUNK_SIZE
,
878 nsgld
-= SEG_CHUNK_SIZE
;
879 addr
+= SEG_CHUNK_SIZE
* sizeof(NvmeSglDescriptor
);
882 ret
= nvme_addr_read(n
, addr
, segment
, nsgld
*
883 sizeof(NvmeSglDescriptor
));
885 trace_pci_nvme_err_addr_read(addr
);
886 status
= NVME_DATA_TRAS_ERROR
;
890 last_sgld
= &segment
[nsgld
- 1];
893 * If the segment ends with a Data Block or Bit Bucket Descriptor Type,
896 switch (NVME_SGL_TYPE(last_sgld
->type
)) {
897 case NVME_SGL_DESCR_TYPE_DATA_BLOCK
:
898 case NVME_SGL_DESCR_TYPE_BIT_BUCKET
:
899 status
= nvme_map_sgl_data(n
, sg
, segment
, nsgld
, &len
, cmd
);
911 * If the last descriptor was not a Data Block or Bit Bucket, then the
912 * current segment must not be a Last Segment.
914 if (NVME_SGL_TYPE(sgld
->type
) == NVME_SGL_DESCR_TYPE_LAST_SEGMENT
) {
915 status
= NVME_INVALID_SGL_SEG_DESCR
| NVME_DNR
;
920 addr
= le64_to_cpu(sgld
->addr
);
923 * Do not map the last descriptor; it will be a Segment or Last Segment
924 * descriptor and is handled by the next iteration.
926 status
= nvme_map_sgl_data(n
, sg
, segment
, nsgld
- 1, &len
, cmd
);
933 /* if there is any residual left in len, the SGL was too short */
935 status
= NVME_DATA_SGL_LEN_INVALID
| NVME_DNR
;
946 uint16_t nvme_map_dptr(NvmeCtrl
*n
, NvmeSg
*sg
, size_t len
,
951 switch (NVME_CMD_FLAGS_PSDT(cmd
->flags
)) {
953 prp1
= le64_to_cpu(cmd
->dptr
.prp1
);
954 prp2
= le64_to_cpu(cmd
->dptr
.prp2
);
956 return nvme_map_prp(n
, sg
, prp1
, prp2
, len
);
957 case NVME_PSDT_SGL_MPTR_CONTIGUOUS
:
958 case NVME_PSDT_SGL_MPTR_SGL
:
959 return nvme_map_sgl(n
, sg
, cmd
->dptr
.sgl
, len
, cmd
);
961 return NVME_INVALID_FIELD
;
965 static uint16_t nvme_map_mptr(NvmeCtrl
*n
, NvmeSg
*sg
, size_t len
,
968 int psdt
= NVME_CMD_FLAGS_PSDT(cmd
->flags
);
969 hwaddr mptr
= le64_to_cpu(cmd
->mptr
);
972 if (psdt
== NVME_PSDT_SGL_MPTR_SGL
) {
973 NvmeSglDescriptor sgl
;
975 if (nvme_addr_read(n
, mptr
, &sgl
, sizeof(sgl
))) {
976 return NVME_DATA_TRAS_ERROR
;
979 status
= nvme_map_sgl(n
, sg
, sgl
, len
, cmd
);
980 if (status
&& (status
& 0x7ff) == NVME_DATA_SGL_LEN_INVALID
) {
981 status
= NVME_MD_SGL_LEN_INVALID
| NVME_DNR
;
987 nvme_sg_init(n
, sg
, nvme_addr_is_dma(n
, mptr
));
988 status
= nvme_map_addr(n
, sg
, mptr
, len
);
996 static uint16_t nvme_map_data(NvmeCtrl
*n
, uint32_t nlb
, NvmeRequest
*req
)
998 NvmeNamespace
*ns
= req
->ns
;
999 NvmeRwCmd
*rw
= (NvmeRwCmd
*)&req
->cmd
;
1000 uint16_t ctrl
= le16_to_cpu(rw
->control
);
1001 size_t len
= nvme_l2b(ns
, nlb
);
1004 if (NVME_ID_NS_DPS_TYPE(ns
->id_ns
.dps
) &&
1005 (ctrl
& NVME_RW_PRINFO_PRACT
&& nvme_msize(ns
) == 8)) {
1009 if (nvme_ns_ext(ns
)) {
1012 len
+= nvme_m2b(ns
, nlb
);
1014 status
= nvme_map_dptr(n
, &sg
, len
, &req
->cmd
);
1019 nvme_sg_init(n
, &req
->sg
, sg
.flags
& NVME_SG_DMA
);
1020 nvme_sg_split(&sg
, ns
, &req
->sg
, NULL
);
1023 return NVME_SUCCESS
;
1027 return nvme_map_dptr(n
, &req
->sg
, len
, &req
->cmd
);
1030 static uint16_t nvme_map_mdata(NvmeCtrl
*n
, uint32_t nlb
, NvmeRequest
*req
)
1032 NvmeNamespace
*ns
= req
->ns
;
1033 size_t len
= nvme_m2b(ns
, nlb
);
1036 if (nvme_ns_ext(ns
)) {
1039 len
+= nvme_l2b(ns
, nlb
);
1041 status
= nvme_map_dptr(n
, &sg
, len
, &req
->cmd
);
1046 nvme_sg_init(n
, &req
->sg
, sg
.flags
& NVME_SG_DMA
);
1047 nvme_sg_split(&sg
, ns
, NULL
, &req
->sg
);
1050 return NVME_SUCCESS
;
1053 return nvme_map_mptr(n
, &req
->sg
, len
, &req
->cmd
);
1056 static uint16_t nvme_tx_interleaved(NvmeCtrl
*n
, NvmeSg
*sg
, uint8_t *ptr
,
1057 uint32_t len
, uint32_t bytes
,
1058 int32_t skip_bytes
, int64_t offset
,
1059 NvmeTxDirection dir
)
1062 uint32_t trans_len
, count
= bytes
;
1063 bool dma
= sg
->flags
& NVME_SG_DMA
;
1068 assert(sg
->flags
& NVME_SG_ALLOC
);
1071 sge_len
= dma
? sg
->qsg
.sg
[sg_idx
].len
: sg
->iov
.iov
[sg_idx
].iov_len
;
1073 if (sge_len
- offset
< 0) {
1079 if (sge_len
== offset
) {
1085 trans_len
= MIN(len
, count
);
1086 trans_len
= MIN(trans_len
, sge_len
- offset
);
1089 addr
= sg
->qsg
.sg
[sg_idx
].base
+ offset
;
1091 addr
= (hwaddr
)(uintptr_t)sg
->iov
.iov
[sg_idx
].iov_base
+ offset
;
1094 if (dir
== NVME_TX_DIRECTION_TO_DEVICE
) {
1095 ret
= nvme_addr_read(n
, addr
, ptr
, trans_len
);
1097 ret
= nvme_addr_write(n
, addr
, ptr
, trans_len
);
1101 return NVME_DATA_TRAS_ERROR
;
1107 offset
+= trans_len
;
1111 offset
+= skip_bytes
;
1115 return NVME_SUCCESS
;
1118 static uint16_t nvme_tx(NvmeCtrl
*n
, NvmeSg
*sg
, uint8_t *ptr
, uint32_t len
,
1119 NvmeTxDirection dir
)
1121 assert(sg
->flags
& NVME_SG_ALLOC
);
1123 if (sg
->flags
& NVME_SG_DMA
) {
1126 if (dir
== NVME_TX_DIRECTION_TO_DEVICE
) {
1127 residual
= dma_buf_write(ptr
, len
, &sg
->qsg
);
1129 residual
= dma_buf_read(ptr
, len
, &sg
->qsg
);
1132 if (unlikely(residual
)) {
1133 trace_pci_nvme_err_invalid_dma();
1134 return NVME_INVALID_FIELD
| NVME_DNR
;
1139 if (dir
== NVME_TX_DIRECTION_TO_DEVICE
) {
1140 bytes
= qemu_iovec_to_buf(&sg
->iov
, 0, ptr
, len
);
1142 bytes
= qemu_iovec_from_buf(&sg
->iov
, 0, ptr
, len
);
1145 if (unlikely(bytes
!= len
)) {
1146 trace_pci_nvme_err_invalid_dma();
1147 return NVME_INVALID_FIELD
| NVME_DNR
;
1151 return NVME_SUCCESS
;
1154 static inline uint16_t nvme_c2h(NvmeCtrl
*n
, uint8_t *ptr
, uint32_t len
,
1159 status
= nvme_map_dptr(n
, &req
->sg
, len
, &req
->cmd
);
1164 return nvme_tx(n
, &req
->sg
, ptr
, len
, NVME_TX_DIRECTION_FROM_DEVICE
);
1167 static inline uint16_t nvme_h2c(NvmeCtrl
*n
, uint8_t *ptr
, uint32_t len
,
1172 status
= nvme_map_dptr(n
, &req
->sg
, len
, &req
->cmd
);
1177 return nvme_tx(n
, &req
->sg
, ptr
, len
, NVME_TX_DIRECTION_TO_DEVICE
);
1180 uint16_t nvme_bounce_data(NvmeCtrl
*n
, uint8_t *ptr
, uint32_t len
,
1181 NvmeTxDirection dir
, NvmeRequest
*req
)
1183 NvmeNamespace
*ns
= req
->ns
;
1184 NvmeRwCmd
*rw
= (NvmeRwCmd
*)&req
->cmd
;
1185 uint16_t ctrl
= le16_to_cpu(rw
->control
);
1187 if (nvme_ns_ext(ns
) &&
1188 !(ctrl
& NVME_RW_PRINFO_PRACT
&& nvme_msize(ns
) == 8)) {
1189 size_t lsize
= nvme_lsize(ns
);
1190 size_t msize
= nvme_msize(ns
);
1192 return nvme_tx_interleaved(n
, &req
->sg
, ptr
, len
, lsize
, msize
, 0,
1196 return nvme_tx(n
, &req
->sg
, ptr
, len
, dir
);
1199 uint16_t nvme_bounce_mdata(NvmeCtrl
*n
, uint8_t *ptr
, uint32_t len
,
1200 NvmeTxDirection dir
, NvmeRequest
*req
)
1202 NvmeNamespace
*ns
= req
->ns
;
1205 if (nvme_ns_ext(ns
)) {
1206 size_t lsize
= nvme_lsize(ns
);
1207 size_t msize
= nvme_msize(ns
);
1209 return nvme_tx_interleaved(n
, &req
->sg
, ptr
, len
, msize
, lsize
, lsize
,
1213 nvme_sg_unmap(&req
->sg
);
1215 status
= nvme_map_mptr(n
, &req
->sg
, len
, &req
->cmd
);
1220 return nvme_tx(n
, &req
->sg
, ptr
, len
, dir
);
1223 static inline void nvme_blk_read(BlockBackend
*blk
, int64_t offset
,
1224 BlockCompletionFunc
*cb
, NvmeRequest
*req
)
1226 assert(req
->sg
.flags
& NVME_SG_ALLOC
);
1228 if (req
->sg
.flags
& NVME_SG_DMA
) {
1229 req
->aiocb
= dma_blk_read(blk
, &req
->sg
.qsg
, offset
, BDRV_SECTOR_SIZE
,
1232 req
->aiocb
= blk_aio_preadv(blk
, offset
, &req
->sg
.iov
, 0, cb
, req
);
1236 static inline void nvme_blk_write(BlockBackend
*blk
, int64_t offset
,
1237 BlockCompletionFunc
*cb
, NvmeRequest
*req
)
1239 assert(req
->sg
.flags
& NVME_SG_ALLOC
);
1241 if (req
->sg
.flags
& NVME_SG_DMA
) {
1242 req
->aiocb
= dma_blk_write(blk
, &req
->sg
.qsg
, offset
, BDRV_SECTOR_SIZE
,
1245 req
->aiocb
= blk_aio_pwritev(blk
, offset
, &req
->sg
.iov
, 0, cb
, req
);
1249 static void nvme_post_cqes(void *opaque
)
1251 NvmeCQueue
*cq
= opaque
;
1252 NvmeCtrl
*n
= cq
->ctrl
;
1253 NvmeRequest
*req
, *next
;
1256 QTAILQ_FOREACH_SAFE(req
, &cq
->req_list
, entry
, next
) {
1260 if (nvme_cq_full(cq
)) {
1265 req
->cqe
.status
= cpu_to_le16((req
->status
<< 1) | cq
->phase
);
1266 req
->cqe
.sq_id
= cpu_to_le16(sq
->sqid
);
1267 req
->cqe
.sq_head
= cpu_to_le16(sq
->head
);
1268 addr
= cq
->dma_addr
+ cq
->tail
* n
->cqe_size
;
1269 ret
= pci_dma_write(&n
->parent_obj
, addr
, (void *)&req
->cqe
,
1272 trace_pci_nvme_err_addr_write(addr
);
1273 trace_pci_nvme_err_cfs();
1274 n
->bar
.csts
= NVME_CSTS_FAILED
;
1277 QTAILQ_REMOVE(&cq
->req_list
, req
, entry
);
1278 nvme_inc_cq_tail(cq
);
1279 nvme_sg_unmap(&req
->sg
);
1280 QTAILQ_INSERT_TAIL(&sq
->req_list
, req
, entry
);
1282 if (cq
->tail
!= cq
->head
) {
1283 nvme_irq_assert(n
, cq
);
1287 static void nvme_enqueue_req_completion(NvmeCQueue
*cq
, NvmeRequest
*req
)
1289 assert(cq
->cqid
== req
->sq
->cqid
);
1290 trace_pci_nvme_enqueue_req_completion(nvme_cid(req
), cq
->cqid
,
1294 trace_pci_nvme_err_req_status(nvme_cid(req
), nvme_nsid(req
->ns
),
1295 req
->status
, req
->cmd
.opcode
);
1298 QTAILQ_REMOVE(&req
->sq
->out_req_list
, req
, entry
);
1299 QTAILQ_INSERT_TAIL(&cq
->req_list
, req
, entry
);
1300 timer_mod(cq
->timer
, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) + 500);
1303 static void nvme_process_aers(void *opaque
)
1305 NvmeCtrl
*n
= opaque
;
1306 NvmeAsyncEvent
*event
, *next
;
1308 trace_pci_nvme_process_aers(n
->aer_queued
);
1310 QTAILQ_FOREACH_SAFE(event
, &n
->aer_queue
, entry
, next
) {
1312 NvmeAerResult
*result
;
1314 /* can't post cqe if there is nothing to complete */
1315 if (!n
->outstanding_aers
) {
1316 trace_pci_nvme_no_outstanding_aers();
1320 /* ignore if masked (cqe posted, but event not cleared) */
1321 if (n
->aer_mask
& (1 << event
->result
.event_type
)) {
1322 trace_pci_nvme_aer_masked(event
->result
.event_type
, n
->aer_mask
);
1326 QTAILQ_REMOVE(&n
->aer_queue
, event
, entry
);
1329 n
->aer_mask
|= 1 << event
->result
.event_type
;
1330 n
->outstanding_aers
--;
1332 req
= n
->aer_reqs
[n
->outstanding_aers
];
1334 result
= (NvmeAerResult
*) &req
->cqe
.result
;
1335 result
->event_type
= event
->result
.event_type
;
1336 result
->event_info
= event
->result
.event_info
;
1337 result
->log_page
= event
->result
.log_page
;
1340 trace_pci_nvme_aer_post_cqe(result
->event_type
, result
->event_info
,
1343 nvme_enqueue_req_completion(&n
->admin_cq
, req
);
1347 static void nvme_enqueue_event(NvmeCtrl
*n
, uint8_t event_type
,
1348 uint8_t event_info
, uint8_t log_page
)
1350 NvmeAsyncEvent
*event
;
1352 trace_pci_nvme_enqueue_event(event_type
, event_info
, log_page
);
1354 if (n
->aer_queued
== n
->params
.aer_max_queued
) {
1355 trace_pci_nvme_enqueue_event_noqueue(n
->aer_queued
);
1359 event
= g_new(NvmeAsyncEvent
, 1);
1360 event
->result
= (NvmeAerResult
) {
1361 .event_type
= event_type
,
1362 .event_info
= event_info
,
1363 .log_page
= log_page
,
1366 QTAILQ_INSERT_TAIL(&n
->aer_queue
, event
, entry
);
1369 nvme_process_aers(n
);
1372 static void nvme_smart_event(NvmeCtrl
*n
, uint8_t event
)
1376 /* Ref SPEC <Asynchronous Event Information 0x2013 SMART / Health Status> */
1377 if (!(NVME_AEC_SMART(n
->features
.async_config
) & event
)) {
1382 case NVME_SMART_SPARE
:
1383 aer_info
= NVME_AER_INFO_SMART_SPARE_THRESH
;
1385 case NVME_SMART_TEMPERATURE
:
1386 aer_info
= NVME_AER_INFO_SMART_TEMP_THRESH
;
1388 case NVME_SMART_RELIABILITY
:
1389 case NVME_SMART_MEDIA_READ_ONLY
:
1390 case NVME_SMART_FAILED_VOLATILE_MEDIA
:
1391 case NVME_SMART_PMR_UNRELIABLE
:
1392 aer_info
= NVME_AER_INFO_SMART_RELIABILITY
;
1398 nvme_enqueue_event(n
, NVME_AER_TYPE_SMART
, aer_info
, NVME_LOG_SMART_INFO
);
1401 static void nvme_clear_events(NvmeCtrl
*n
, uint8_t event_type
)
1403 n
->aer_mask
&= ~(1 << event_type
);
1404 if (!QTAILQ_EMPTY(&n
->aer_queue
)) {
1405 nvme_process_aers(n
);
1409 static inline uint16_t nvme_check_mdts(NvmeCtrl
*n
, size_t len
)
1411 uint8_t mdts
= n
->params
.mdts
;
1413 if (mdts
&& len
> n
->page_size
<< mdts
) {
1414 trace_pci_nvme_err_mdts(len
);
1415 return NVME_INVALID_FIELD
| NVME_DNR
;
1418 return NVME_SUCCESS
;
1421 static inline uint16_t nvme_check_bounds(NvmeNamespace
*ns
, uint64_t slba
,
1424 uint64_t nsze
= le64_to_cpu(ns
->id_ns
.nsze
);
1426 if (unlikely(UINT64_MAX
- slba
< nlb
|| slba
+ nlb
> nsze
)) {
1427 return NVME_LBA_RANGE
| NVME_DNR
;
1430 return NVME_SUCCESS
;
1433 static uint16_t nvme_check_dulbe(NvmeNamespace
*ns
, uint64_t slba
,
1436 BlockDriverState
*bs
= blk_bs(ns
->blkconf
.blk
);
1438 int64_t pnum
= 0, bytes
= nvme_l2b(ns
, nlb
);
1439 int64_t offset
= nvme_l2b(ns
, slba
);
1443 Error
*local_err
= NULL
;
1446 * `pnum` holds the number of bytes after offset that shares the same
1447 * allocation status as the byte at offset. If `pnum` is different from
1448 * `bytes`, we should check the allocation status of the next range and
1449 * continue this until all bytes have been checked.
1454 ret
= bdrv_block_status(bs
, offset
, bytes
, &pnum
, NULL
, NULL
);
1456 error_setg_errno(&local_err
, -ret
, "unable to get block status");
1457 error_report_err(local_err
);
1459 return NVME_INTERNAL_DEV_ERROR
;
1462 zeroed
= !!(ret
& BDRV_BLOCK_ZERO
);
1464 trace_pci_nvme_block_status(offset
, bytes
, pnum
, ret
, zeroed
);
1471 } while (pnum
!= bytes
);
1473 return NVME_SUCCESS
;
1476 static void nvme_aio_err(NvmeRequest
*req
, int ret
)
1478 uint16_t status
= NVME_SUCCESS
;
1479 Error
*local_err
= NULL
;
1481 switch (req
->cmd
.opcode
) {
1483 status
= NVME_UNRECOVERED_READ
;
1485 case NVME_CMD_FLUSH
:
1486 case NVME_CMD_WRITE
:
1487 case NVME_CMD_WRITE_ZEROES
:
1488 case NVME_CMD_ZONE_APPEND
:
1489 status
= NVME_WRITE_FAULT
;
1492 status
= NVME_INTERNAL_DEV_ERROR
;
1496 trace_pci_nvme_err_aio(nvme_cid(req
), strerror(-ret
), status
);
1498 error_setg_errno(&local_err
, -ret
, "aio failed");
1499 error_report_err(local_err
);
1502 * Set the command status code to the first encountered error but allow a
1503 * subsequent Internal Device Error to trump it.
1505 if (req
->status
&& status
!= NVME_INTERNAL_DEV_ERROR
) {
1509 req
->status
= status
;
1512 static inline uint32_t nvme_zone_idx(NvmeNamespace
*ns
, uint64_t slba
)
1514 return ns
->zone_size_log2
> 0 ? slba
>> ns
->zone_size_log2
:
1515 slba
/ ns
->zone_size
;
1518 static inline NvmeZone
*nvme_get_zone_by_slba(NvmeNamespace
*ns
, uint64_t slba
)
1520 uint32_t zone_idx
= nvme_zone_idx(ns
, slba
);
1522 assert(zone_idx
< ns
->num_zones
);
1523 return &ns
->zone_array
[zone_idx
];
1526 static uint16_t nvme_check_zone_state_for_write(NvmeZone
*zone
)
1528 uint64_t zslba
= zone
->d
.zslba
;
1530 switch (nvme_get_zone_state(zone
)) {
1531 case NVME_ZONE_STATE_EMPTY
:
1532 case NVME_ZONE_STATE_IMPLICITLY_OPEN
:
1533 case NVME_ZONE_STATE_EXPLICITLY_OPEN
:
1534 case NVME_ZONE_STATE_CLOSED
:
1535 return NVME_SUCCESS
;
1536 case NVME_ZONE_STATE_FULL
:
1537 trace_pci_nvme_err_zone_is_full(zslba
);
1538 return NVME_ZONE_FULL
;
1539 case NVME_ZONE_STATE_OFFLINE
:
1540 trace_pci_nvme_err_zone_is_offline(zslba
);
1541 return NVME_ZONE_OFFLINE
;
1542 case NVME_ZONE_STATE_READ_ONLY
:
1543 trace_pci_nvme_err_zone_is_read_only(zslba
);
1544 return NVME_ZONE_READ_ONLY
;
1549 return NVME_INTERNAL_DEV_ERROR
;
1552 static uint16_t nvme_check_zone_write(NvmeNamespace
*ns
, NvmeZone
*zone
,
1553 uint64_t slba
, uint32_t nlb
)
1555 uint64_t zcap
= nvme_zone_wr_boundary(zone
);
1558 status
= nvme_check_zone_state_for_write(zone
);
1563 if (unlikely(slba
!= zone
->w_ptr
)) {
1564 trace_pci_nvme_err_write_not_at_wp(slba
, zone
->d
.zslba
, zone
->w_ptr
);
1565 return NVME_ZONE_INVALID_WRITE
;
1568 if (unlikely((slba
+ nlb
) > zcap
)) {
1569 trace_pci_nvme_err_zone_boundary(slba
, nlb
, zcap
);
1570 return NVME_ZONE_BOUNDARY_ERROR
;
1573 return NVME_SUCCESS
;
1576 static uint16_t nvme_check_zone_state_for_read(NvmeZone
*zone
)
1578 switch (nvme_get_zone_state(zone
)) {
1579 case NVME_ZONE_STATE_EMPTY
:
1580 case NVME_ZONE_STATE_IMPLICITLY_OPEN
:
1581 case NVME_ZONE_STATE_EXPLICITLY_OPEN
:
1582 case NVME_ZONE_STATE_FULL
:
1583 case NVME_ZONE_STATE_CLOSED
:
1584 case NVME_ZONE_STATE_READ_ONLY
:
1585 return NVME_SUCCESS
;
1586 case NVME_ZONE_STATE_OFFLINE
:
1587 trace_pci_nvme_err_zone_is_offline(zone
->d
.zslba
);
1588 return NVME_ZONE_OFFLINE
;
1593 return NVME_INTERNAL_DEV_ERROR
;
1596 static uint16_t nvme_check_zone_read(NvmeNamespace
*ns
, uint64_t slba
,
1599 NvmeZone
*zone
= nvme_get_zone_by_slba(ns
, slba
);
1600 uint64_t bndry
= nvme_zone_rd_boundary(ns
, zone
);
1601 uint64_t end
= slba
+ nlb
;
1604 status
= nvme_check_zone_state_for_read(zone
);
1607 } else if (unlikely(end
> bndry
)) {
1608 if (!ns
->params
.cross_zone_read
) {
1609 status
= NVME_ZONE_BOUNDARY_ERROR
;
1612 * Read across zone boundary - check that all subsequent
1613 * zones that are being read have an appropriate state.
1617 status
= nvme_check_zone_state_for_read(zone
);
1621 } while (end
> nvme_zone_rd_boundary(ns
, zone
));
1628 static uint16_t nvme_zrm_finish(NvmeNamespace
*ns
, NvmeZone
*zone
)
1630 switch (nvme_get_zone_state(zone
)) {
1631 case NVME_ZONE_STATE_FULL
:
1632 return NVME_SUCCESS
;
1634 case NVME_ZONE_STATE_IMPLICITLY_OPEN
:
1635 case NVME_ZONE_STATE_EXPLICITLY_OPEN
:
1636 nvme_aor_dec_open(ns
);
1638 case NVME_ZONE_STATE_CLOSED
:
1639 nvme_aor_dec_active(ns
);
1641 case NVME_ZONE_STATE_EMPTY
:
1642 nvme_assign_zone_state(ns
, zone
, NVME_ZONE_STATE_FULL
);
1643 return NVME_SUCCESS
;
1646 return NVME_ZONE_INVAL_TRANSITION
;
1650 static uint16_t nvme_zrm_close(NvmeNamespace
*ns
, NvmeZone
*zone
)
1652 switch (nvme_get_zone_state(zone
)) {
1653 case NVME_ZONE_STATE_EXPLICITLY_OPEN
:
1654 case NVME_ZONE_STATE_IMPLICITLY_OPEN
:
1655 nvme_aor_dec_open(ns
);
1656 nvme_assign_zone_state(ns
, zone
, NVME_ZONE_STATE_CLOSED
);
1658 case NVME_ZONE_STATE_CLOSED
:
1659 return NVME_SUCCESS
;
1662 return NVME_ZONE_INVAL_TRANSITION
;
1666 static void nvme_zrm_auto_transition_zone(NvmeNamespace
*ns
)
1670 if (ns
->params
.max_open_zones
&&
1671 ns
->nr_open_zones
== ns
->params
.max_open_zones
) {
1672 zone
= QTAILQ_FIRST(&ns
->imp_open_zones
);
1675 * Automatically close this implicitly open zone.
1677 QTAILQ_REMOVE(&ns
->imp_open_zones
, zone
, entry
);
1678 nvme_zrm_close(ns
, zone
);
1683 static uint16_t __nvme_zrm_open(NvmeNamespace
*ns
, NvmeZone
*zone
,
1689 switch (nvme_get_zone_state(zone
)) {
1690 case NVME_ZONE_STATE_EMPTY
:
1695 case NVME_ZONE_STATE_CLOSED
:
1696 nvme_zrm_auto_transition_zone(ns
);
1697 status
= nvme_aor_check(ns
, act
, 1);
1703 nvme_aor_inc_active(ns
);
1706 nvme_aor_inc_open(ns
);
1709 nvme_assign_zone_state(ns
, zone
, NVME_ZONE_STATE_IMPLICITLY_OPEN
);
1710 return NVME_SUCCESS
;
1715 case NVME_ZONE_STATE_IMPLICITLY_OPEN
:
1717 return NVME_SUCCESS
;
1720 nvme_assign_zone_state(ns
, zone
, NVME_ZONE_STATE_EXPLICITLY_OPEN
);
1724 case NVME_ZONE_STATE_EXPLICITLY_OPEN
:
1725 return NVME_SUCCESS
;
1728 return NVME_ZONE_INVAL_TRANSITION
;
1732 static inline uint16_t nvme_zrm_auto(NvmeNamespace
*ns
, NvmeZone
*zone
)
1734 return __nvme_zrm_open(ns
, zone
, true);
1737 static inline uint16_t nvme_zrm_open(NvmeNamespace
*ns
, NvmeZone
*zone
)
1739 return __nvme_zrm_open(ns
, zone
, false);
1742 static void __nvme_advance_zone_wp(NvmeNamespace
*ns
, NvmeZone
*zone
,
1747 if (zone
->d
.wp
== nvme_zone_wr_boundary(zone
)) {
1748 nvme_zrm_finish(ns
, zone
);
1752 static void nvme_finalize_zoned_write(NvmeNamespace
*ns
, NvmeRequest
*req
)
1754 NvmeRwCmd
*rw
= (NvmeRwCmd
*)&req
->cmd
;
1759 slba
= le64_to_cpu(rw
->slba
);
1760 nlb
= le16_to_cpu(rw
->nlb
) + 1;
1761 zone
= nvme_get_zone_by_slba(ns
, slba
);
1763 __nvme_advance_zone_wp(ns
, zone
, nlb
);
1766 static inline bool nvme_is_write(NvmeRequest
*req
)
1768 NvmeRwCmd
*rw
= (NvmeRwCmd
*)&req
->cmd
;
1770 return rw
->opcode
== NVME_CMD_WRITE
||
1771 rw
->opcode
== NVME_CMD_ZONE_APPEND
||
1772 rw
->opcode
== NVME_CMD_WRITE_ZEROES
;
1775 static void nvme_misc_cb(void *opaque
, int ret
)
1777 NvmeRequest
*req
= opaque
;
1778 NvmeNamespace
*ns
= req
->ns
;
1780 BlockBackend
*blk
= ns
->blkconf
.blk
;
1781 BlockAcctCookie
*acct
= &req
->acct
;
1782 BlockAcctStats
*stats
= blk_get_stats(blk
);
1784 trace_pci_nvme_misc_cb(nvme_cid(req
), blk_name(blk
));
1787 block_acct_failed(stats
, acct
);
1788 nvme_aio_err(req
, ret
);
1790 block_acct_done(stats
, acct
);
1793 nvme_enqueue_req_completion(nvme_cq(req
), req
);
1796 void nvme_rw_complete_cb(void *opaque
, int ret
)
1798 NvmeRequest
*req
= opaque
;
1799 NvmeNamespace
*ns
= req
->ns
;
1800 BlockBackend
*blk
= ns
->blkconf
.blk
;
1801 BlockAcctCookie
*acct
= &req
->acct
;
1802 BlockAcctStats
*stats
= blk_get_stats(blk
);
1804 trace_pci_nvme_rw_complete_cb(nvme_cid(req
), blk_name(blk
));
1807 block_acct_failed(stats
, acct
);
1808 nvme_aio_err(req
, ret
);
1810 block_acct_done(stats
, acct
);
1813 if (ns
->params
.zoned
&& nvme_is_write(req
)) {
1814 nvme_finalize_zoned_write(ns
, req
);
1817 nvme_enqueue_req_completion(nvme_cq(req
), req
);
1820 static void nvme_rw_cb(void *opaque
, int ret
)
1822 NvmeRequest
*req
= opaque
;
1823 NvmeNamespace
*ns
= req
->ns
;
1825 BlockBackend
*blk
= ns
->blkconf
.blk
;
1827 trace_pci_nvme_rw_cb(nvme_cid(req
), blk_name(blk
));
1833 if (nvme_msize(ns
)) {
1834 NvmeRwCmd
*rw
= (NvmeRwCmd
*)&req
->cmd
;
1835 uint64_t slba
= le64_to_cpu(rw
->slba
);
1836 uint32_t nlb
= (uint32_t)le16_to_cpu(rw
->nlb
) + 1;
1837 uint64_t offset
= ns
->mdata_offset
+ nvme_m2b(ns
, slba
);
1839 if (req
->cmd
.opcode
== NVME_CMD_WRITE_ZEROES
) {
1840 size_t mlen
= nvme_m2b(ns
, nlb
);
1842 req
->aiocb
= blk_aio_pwrite_zeroes(blk
, offset
, mlen
,
1844 nvme_rw_complete_cb
, req
);
1848 if (nvme_ns_ext(ns
) || req
->cmd
.mptr
) {
1851 nvme_sg_unmap(&req
->sg
);
1852 status
= nvme_map_mdata(nvme_ctrl(req
), nlb
, req
);
1858 if (req
->cmd
.opcode
== NVME_CMD_READ
) {
1859 return nvme_blk_read(blk
, offset
, nvme_rw_complete_cb
, req
);
1862 return nvme_blk_write(blk
, offset
, nvme_rw_complete_cb
, req
);
1867 nvme_rw_complete_cb(req
, ret
);
1870 struct nvme_aio_format_ctx
{
1874 /* number of outstanding write zeroes for this namespace */
1878 static void nvme_aio_format_cb(void *opaque
, int ret
)
1880 struct nvme_aio_format_ctx
*ctx
= opaque
;
1881 NvmeRequest
*req
= ctx
->req
;
1882 NvmeNamespace
*ns
= ctx
->ns
;
1883 uintptr_t *num_formats
= (uintptr_t *)&req
->opaque
;
1884 int *count
= ctx
->count
;
1889 nvme_aio_err(req
, ret
);
1899 if (--(*num_formats
)) {
1903 nvme_enqueue_req_completion(nvme_cq(req
), req
);
1906 struct nvme_aio_flush_ctx
{
1909 BlockAcctCookie acct
;
1912 static void nvme_aio_flush_cb(void *opaque
, int ret
)
1914 struct nvme_aio_flush_ctx
*ctx
= opaque
;
1915 NvmeRequest
*req
= ctx
->req
;
1916 uintptr_t *num_flushes
= (uintptr_t *)&req
->opaque
;
1918 BlockBackend
*blk
= ctx
->ns
->blkconf
.blk
;
1919 BlockAcctCookie
*acct
= &ctx
->acct
;
1920 BlockAcctStats
*stats
= blk_get_stats(blk
);
1922 trace_pci_nvme_aio_flush_cb(nvme_cid(req
), blk_name(blk
));
1925 block_acct_done(stats
, acct
);
1927 block_acct_failed(stats
, acct
);
1928 nvme_aio_err(req
, ret
);
1938 nvme_enqueue_req_completion(nvme_cq(req
), req
);
1941 static void nvme_verify_cb(void *opaque
, int ret
)
1943 NvmeBounceContext
*ctx
= opaque
;
1944 NvmeRequest
*req
= ctx
->req
;
1945 NvmeNamespace
*ns
= req
->ns
;
1946 BlockBackend
*blk
= ns
->blkconf
.blk
;
1947 BlockAcctCookie
*acct
= &req
->acct
;
1948 BlockAcctStats
*stats
= blk_get_stats(blk
);
1949 NvmeRwCmd
*rw
= (NvmeRwCmd
*)&req
->cmd
;
1950 uint64_t slba
= le64_to_cpu(rw
->slba
);
1951 uint16_t ctrl
= le16_to_cpu(rw
->control
);
1952 uint16_t apptag
= le16_to_cpu(rw
->apptag
);
1953 uint16_t appmask
= le16_to_cpu(rw
->appmask
);
1954 uint32_t reftag
= le32_to_cpu(rw
->reftag
);
1957 trace_pci_nvme_verify_cb(nvme_cid(req
), NVME_RW_PRINFO(ctrl
), apptag
,
1961 block_acct_failed(stats
, acct
);
1962 nvme_aio_err(req
, ret
);
1966 block_acct_done(stats
, acct
);
1968 if (NVME_ID_NS_DPS_TYPE(ns
->id_ns
.dps
)) {
1969 status
= nvme_dif_mangle_mdata(ns
, ctx
->mdata
.bounce
,
1970 ctx
->mdata
.iov
.size
, slba
);
1972 req
->status
= status
;
1976 req
->status
= nvme_dif_check(ns
, ctx
->data
.bounce
, ctx
->data
.iov
.size
,
1977 ctx
->mdata
.bounce
, ctx
->mdata
.iov
.size
,
1978 ctrl
, slba
, apptag
, appmask
, reftag
);
1982 qemu_iovec_destroy(&ctx
->data
.iov
);
1983 g_free(ctx
->data
.bounce
);
1985 qemu_iovec_destroy(&ctx
->mdata
.iov
);
1986 g_free(ctx
->mdata
.bounce
);
1990 nvme_enqueue_req_completion(nvme_cq(req
), req
);
1994 static void nvme_verify_mdata_in_cb(void *opaque
, int ret
)
1996 NvmeBounceContext
*ctx
= opaque
;
1997 NvmeRequest
*req
= ctx
->req
;
1998 NvmeNamespace
*ns
= req
->ns
;
1999 NvmeRwCmd
*rw
= (NvmeRwCmd
*)&req
->cmd
;
2000 uint64_t slba
= le64_to_cpu(rw
->slba
);
2001 uint32_t nlb
= le16_to_cpu(rw
->nlb
) + 1;
2002 size_t mlen
= nvme_m2b(ns
, nlb
);
2003 uint64_t offset
= ns
->mdata_offset
+ nvme_m2b(ns
, slba
);
2004 BlockBackend
*blk
= ns
->blkconf
.blk
;
2006 trace_pci_nvme_verify_mdata_in_cb(nvme_cid(req
), blk_name(blk
));
2012 ctx
->mdata
.bounce
= g_malloc(mlen
);
2014 qemu_iovec_reset(&ctx
->mdata
.iov
);
2015 qemu_iovec_add(&ctx
->mdata
.iov
, ctx
->mdata
.bounce
, mlen
);
2017 req
->aiocb
= blk_aio_preadv(blk
, offset
, &ctx
->mdata
.iov
, 0,
2018 nvme_verify_cb
, ctx
);
2022 nvme_verify_cb(ctx
, ret
);
2025 static void nvme_aio_discard_cb(void *opaque
, int ret
)
2027 NvmeRequest
*req
= opaque
;
2028 uintptr_t *discards
= (uintptr_t *)&req
->opaque
;
2030 trace_pci_nvme_aio_discard_cb(nvme_cid(req
));
2033 nvme_aio_err(req
, ret
);
2042 nvme_enqueue_req_completion(nvme_cq(req
), req
);
2045 struct nvme_zone_reset_ctx
{
2050 static void nvme_aio_zone_reset_complete_cb(void *opaque
, int ret
)
2052 struct nvme_zone_reset_ctx
*ctx
= opaque
;
2053 NvmeRequest
*req
= ctx
->req
;
2054 NvmeNamespace
*ns
= req
->ns
;
2055 NvmeZone
*zone
= ctx
->zone
;
2056 uintptr_t *resets
= (uintptr_t *)&req
->opaque
;
2059 nvme_aio_err(req
, ret
);
2063 switch (nvme_get_zone_state(zone
)) {
2064 case NVME_ZONE_STATE_EXPLICITLY_OPEN
:
2065 case NVME_ZONE_STATE_IMPLICITLY_OPEN
:
2066 nvme_aor_dec_open(ns
);
2068 case NVME_ZONE_STATE_CLOSED
:
2069 nvme_aor_dec_active(ns
);
2071 case NVME_ZONE_STATE_FULL
:
2072 zone
->w_ptr
= zone
->d
.zslba
;
2073 zone
->d
.wp
= zone
->w_ptr
;
2074 nvme_assign_zone_state(ns
, zone
, NVME_ZONE_STATE_EMPTY
);
2089 nvme_enqueue_req_completion(nvme_cq(req
), req
);
2092 static void nvme_aio_zone_reset_cb(void *opaque
, int ret
)
2094 struct nvme_zone_reset_ctx
*ctx
= opaque
;
2095 NvmeRequest
*req
= ctx
->req
;
2096 NvmeNamespace
*ns
= req
->ns
;
2097 NvmeZone
*zone
= ctx
->zone
;
2099 trace_pci_nvme_aio_zone_reset_cb(nvme_cid(req
), zone
->d
.zslba
);
2105 if (nvme_msize(ns
)) {
2106 int64_t offset
= ns
->mdata_offset
+ nvme_m2b(ns
, zone
->d
.zslba
);
2108 blk_aio_pwrite_zeroes(ns
->blkconf
.blk
, offset
,
2109 nvme_m2b(ns
, ns
->zone_size
), BDRV_REQ_MAY_UNMAP
,
2110 nvme_aio_zone_reset_complete_cb
, ctx
);
2115 nvme_aio_zone_reset_complete_cb(opaque
, ret
);
2118 struct nvme_copy_ctx
{
2123 NvmeCopySourceRange
*ranges
;
2126 struct nvme_copy_in_ctx
{
2129 NvmeCopySourceRange
*range
;
2132 static void nvme_copy_complete_cb(void *opaque
, int ret
)
2134 NvmeRequest
*req
= opaque
;
2135 NvmeNamespace
*ns
= req
->ns
;
2136 struct nvme_copy_ctx
*ctx
= req
->opaque
;
2139 block_acct_failed(blk_get_stats(ns
->blkconf
.blk
), &req
->acct
);
2140 nvme_aio_err(req
, ret
);
2144 block_acct_done(blk_get_stats(ns
->blkconf
.blk
), &req
->acct
);
2147 if (ns
->params
.zoned
) {
2148 NvmeCopyCmd
*copy
= (NvmeCopyCmd
*)&req
->cmd
;
2149 uint64_t sdlba
= le64_to_cpu(copy
->sdlba
);
2150 NvmeZone
*zone
= nvme_get_zone_by_slba(ns
, sdlba
);
2152 __nvme_advance_zone_wp(ns
, zone
, ctx
->nlb
);
2155 g_free(ctx
->bounce
);
2156 g_free(ctx
->mbounce
);
2159 nvme_enqueue_req_completion(nvme_cq(req
), req
);
2162 static void nvme_copy_cb(void *opaque
, int ret
)
2164 NvmeRequest
*req
= opaque
;
2165 NvmeNamespace
*ns
= req
->ns
;
2166 struct nvme_copy_ctx
*ctx
= req
->opaque
;
2168 trace_pci_nvme_copy_cb(nvme_cid(req
));
2174 if (nvme_msize(ns
)) {
2175 NvmeCopyCmd
*copy
= (NvmeCopyCmd
*)&req
->cmd
;
2176 uint64_t sdlba
= le64_to_cpu(copy
->sdlba
);
2177 int64_t offset
= ns
->mdata_offset
+ nvme_m2b(ns
, sdlba
);
2179 qemu_iovec_reset(&req
->sg
.iov
);
2180 qemu_iovec_add(&req
->sg
.iov
, ctx
->mbounce
, nvme_m2b(ns
, ctx
->nlb
));
2182 req
->aiocb
= blk_aio_pwritev(ns
->blkconf
.blk
, offset
, &req
->sg
.iov
, 0,
2183 nvme_copy_complete_cb
, req
);
2188 nvme_copy_complete_cb(opaque
, ret
);
2191 static void nvme_copy_in_complete(NvmeRequest
*req
)
2193 NvmeNamespace
*ns
= req
->ns
;
2194 NvmeCopyCmd
*copy
= (NvmeCopyCmd
*)&req
->cmd
;
2195 struct nvme_copy_ctx
*ctx
= req
->opaque
;
2196 uint64_t sdlba
= le64_to_cpu(copy
->sdlba
);
2199 trace_pci_nvme_copy_in_complete(nvme_cid(req
));
2201 block_acct_done(blk_get_stats(ns
->blkconf
.blk
), &req
->acct
);
2203 if (NVME_ID_NS_DPS_TYPE(ns
->id_ns
.dps
)) {
2204 uint16_t prinfor
= (copy
->control
[0] >> 4) & 0xf;
2205 uint16_t prinfow
= (copy
->control
[2] >> 2) & 0xf;
2206 uint16_t nr
= copy
->nr
+ 1;
2207 NvmeCopySourceRange
*range
;
2210 uint16_t apptag
, appmask
;
2212 uint8_t *buf
= ctx
->bounce
, *mbuf
= ctx
->mbounce
;
2217 * The dif helpers expects prinfo to be similar to the control field of
2218 * the NvmeRwCmd, so shift by 10 to fake it.
2220 prinfor
= prinfor
<< 10;
2221 prinfow
= prinfow
<< 10;
2223 for (i
= 0; i
< nr
; i
++) {
2224 range
= &ctx
->ranges
[i
];
2225 slba
= le64_to_cpu(range
->slba
);
2226 nlb
= le16_to_cpu(range
->nlb
) + 1;
2227 len
= nvme_l2b(ns
, nlb
);
2228 mlen
= nvme_m2b(ns
, nlb
);
2229 apptag
= le16_to_cpu(range
->apptag
);
2230 appmask
= le16_to_cpu(range
->appmask
);
2231 reftag
= le32_to_cpu(range
->reftag
);
2233 status
= nvme_dif_check(ns
, buf
, len
, mbuf
, mlen
, prinfor
, slba
,
2234 apptag
, appmask
, reftag
);
2243 apptag
= le16_to_cpu(copy
->apptag
);
2244 appmask
= le16_to_cpu(copy
->appmask
);
2245 reftag
= le32_to_cpu(copy
->reftag
);
2247 if (prinfow
& NVME_RW_PRINFO_PRACT
) {
2248 size_t len
= nvme_l2b(ns
, ctx
->nlb
);
2249 size_t mlen
= nvme_m2b(ns
, ctx
->nlb
);
2251 status
= nvme_check_prinfo(ns
, prinfow
, sdlba
, reftag
);
2256 nvme_dif_pract_generate_dif(ns
, ctx
->bounce
, len
, ctx
->mbounce
,
2257 mlen
, apptag
, reftag
);
2259 status
= nvme_dif_check(ns
, ctx
->bounce
, len
, ctx
->mbounce
, mlen
,
2260 prinfow
, sdlba
, apptag
, appmask
, reftag
);
2267 status
= nvme_check_bounds(ns
, sdlba
, ctx
->nlb
);
2269 trace_pci_nvme_err_invalid_lba_range(sdlba
, ctx
->nlb
, ns
->id_ns
.nsze
);
2273 if (ns
->params
.zoned
) {
2274 NvmeZone
*zone
= nvme_get_zone_by_slba(ns
, sdlba
);
2276 status
= nvme_check_zone_write(ns
, zone
, sdlba
, ctx
->nlb
);
2281 status
= nvme_zrm_auto(ns
, zone
);
2286 zone
->w_ptr
+= ctx
->nlb
;
2289 qemu_iovec_init(&req
->sg
.iov
, 1);
2290 qemu_iovec_add(&req
->sg
.iov
, ctx
->bounce
, nvme_l2b(ns
, ctx
->nlb
));
2292 block_acct_start(blk_get_stats(ns
->blkconf
.blk
), &req
->acct
, 0,
2295 req
->aiocb
= blk_aio_pwritev(ns
->blkconf
.blk
, nvme_l2b(ns
, sdlba
),
2296 &req
->sg
.iov
, 0, nvme_copy_cb
, req
);
2301 req
->status
= status
;
2303 g_free(ctx
->bounce
);
2306 nvme_enqueue_req_completion(nvme_cq(req
), req
);
2309 static void nvme_aio_copy_in_cb(void *opaque
, int ret
)
2311 struct nvme_copy_in_ctx
*in_ctx
= opaque
;
2312 NvmeRequest
*req
= in_ctx
->req
;
2313 NvmeNamespace
*ns
= req
->ns
;
2314 struct nvme_copy_ctx
*ctx
= req
->opaque
;
2316 qemu_iovec_destroy(&in_ctx
->iov
);
2319 trace_pci_nvme_aio_copy_in_cb(nvme_cid(req
));
2322 nvme_aio_err(req
, ret
);
2332 block_acct_failed(blk_get_stats(ns
->blkconf
.blk
), &req
->acct
);
2334 g_free(ctx
->bounce
);
2335 g_free(ctx
->mbounce
);
2338 nvme_enqueue_req_completion(nvme_cq(req
), req
);
2343 nvme_copy_in_complete(req
);
2346 struct nvme_compare_ctx
{
2358 static void nvme_compare_mdata_cb(void *opaque
, int ret
)
2360 NvmeRequest
*req
= opaque
;
2361 NvmeNamespace
*ns
= req
->ns
;
2362 NvmeCtrl
*n
= nvme_ctrl(req
);
2363 NvmeRwCmd
*rw
= (NvmeRwCmd
*)&req
->cmd
;
2364 uint16_t ctrl
= le16_to_cpu(rw
->control
);
2365 uint16_t apptag
= le16_to_cpu(rw
->apptag
);
2366 uint16_t appmask
= le16_to_cpu(rw
->appmask
);
2367 uint32_t reftag
= le32_to_cpu(rw
->reftag
);
2368 struct nvme_compare_ctx
*ctx
= req
->opaque
;
2369 g_autofree
uint8_t *buf
= NULL
;
2370 uint16_t status
= NVME_SUCCESS
;
2372 trace_pci_nvme_compare_mdata_cb(nvme_cid(req
));
2374 buf
= g_malloc(ctx
->mdata
.iov
.size
);
2376 status
= nvme_bounce_mdata(n
, buf
, ctx
->mdata
.iov
.size
,
2377 NVME_TX_DIRECTION_TO_DEVICE
, req
);
2379 req
->status
= status
;
2383 if (NVME_ID_NS_DPS_TYPE(ns
->id_ns
.dps
)) {
2384 uint64_t slba
= le64_to_cpu(rw
->slba
);
2386 uint8_t *mbufp
= ctx
->mdata
.bounce
;
2387 uint8_t *end
= mbufp
+ ctx
->mdata
.iov
.size
;
2388 size_t msize
= nvme_msize(ns
);
2391 status
= nvme_dif_check(ns
, ctx
->data
.bounce
, ctx
->data
.iov
.size
,
2392 ctx
->mdata
.bounce
, ctx
->mdata
.iov
.size
, ctrl
,
2393 slba
, apptag
, appmask
, reftag
);
2395 req
->status
= status
;
2400 * When formatted with protection information, do not compare the DIF
2403 if (!(ns
->id_ns
.dps
& NVME_ID_NS_DPS_FIRST_EIGHT
)) {
2404 pil
= nvme_msize(ns
) - sizeof(NvmeDifTuple
);
2407 for (bufp
= buf
; mbufp
< end
; bufp
+= msize
, mbufp
+= msize
) {
2408 if (memcmp(bufp
+ pil
, mbufp
+ pil
, msize
- pil
)) {
2409 req
->status
= NVME_CMP_FAILURE
;
2417 if (memcmp(buf
, ctx
->mdata
.bounce
, ctx
->mdata
.iov
.size
)) {
2418 req
->status
= NVME_CMP_FAILURE
;
2423 qemu_iovec_destroy(&ctx
->data
.iov
);
2424 g_free(ctx
->data
.bounce
);
2426 qemu_iovec_destroy(&ctx
->mdata
.iov
);
2427 g_free(ctx
->mdata
.bounce
);
2431 nvme_enqueue_req_completion(nvme_cq(req
), req
);
2434 static void nvme_compare_data_cb(void *opaque
, int ret
)
2436 NvmeRequest
*req
= opaque
;
2437 NvmeCtrl
*n
= nvme_ctrl(req
);
2438 NvmeNamespace
*ns
= req
->ns
;
2439 BlockBackend
*blk
= ns
->blkconf
.blk
;
2440 BlockAcctCookie
*acct
= &req
->acct
;
2441 BlockAcctStats
*stats
= blk_get_stats(blk
);
2443 struct nvme_compare_ctx
*ctx
= req
->opaque
;
2444 g_autofree
uint8_t *buf
= NULL
;
2447 trace_pci_nvme_compare_data_cb(nvme_cid(req
));
2450 block_acct_failed(stats
, acct
);
2451 nvme_aio_err(req
, ret
);
2455 buf
= g_malloc(ctx
->data
.iov
.size
);
2457 status
= nvme_bounce_data(n
, buf
, ctx
->data
.iov
.size
,
2458 NVME_TX_DIRECTION_TO_DEVICE
, req
);
2460 req
->status
= status
;
2464 if (memcmp(buf
, ctx
->data
.bounce
, ctx
->data
.iov
.size
)) {
2465 req
->status
= NVME_CMP_FAILURE
;
2469 if (nvme_msize(ns
)) {
2470 NvmeRwCmd
*rw
= (NvmeRwCmd
*)&req
->cmd
;
2471 uint64_t slba
= le64_to_cpu(rw
->slba
);
2472 uint32_t nlb
= le16_to_cpu(rw
->nlb
) + 1;
2473 size_t mlen
= nvme_m2b(ns
, nlb
);
2474 uint64_t offset
= ns
->mdata_offset
+ nvme_m2b(ns
, slba
);
2476 ctx
->mdata
.bounce
= g_malloc(mlen
);
2478 qemu_iovec_init(&ctx
->mdata
.iov
, 1);
2479 qemu_iovec_add(&ctx
->mdata
.iov
, ctx
->mdata
.bounce
, mlen
);
2481 req
->aiocb
= blk_aio_preadv(blk
, offset
, &ctx
->mdata
.iov
, 0,
2482 nvme_compare_mdata_cb
, req
);
2486 block_acct_done(stats
, acct
);
2489 qemu_iovec_destroy(&ctx
->data
.iov
);
2490 g_free(ctx
->data
.bounce
);
2493 nvme_enqueue_req_completion(nvme_cq(req
), req
);
2496 static uint16_t nvme_dsm(NvmeCtrl
*n
, NvmeRequest
*req
)
2498 NvmeNamespace
*ns
= req
->ns
;
2499 NvmeDsmCmd
*dsm
= (NvmeDsmCmd
*) &req
->cmd
;
2501 uint32_t attr
= le32_to_cpu(dsm
->attributes
);
2502 uint32_t nr
= (le32_to_cpu(dsm
->nr
) & 0xff) + 1;
2504 uint16_t status
= NVME_SUCCESS
;
2506 trace_pci_nvme_dsm(nvme_cid(req
), nvme_nsid(ns
), nr
, attr
);
2508 if (attr
& NVME_DSMGMT_AD
) {
2511 NvmeDsmRange range
[nr
];
2512 uintptr_t *discards
= (uintptr_t *)&req
->opaque
;
2514 status
= nvme_h2c(n
, (uint8_t *)range
, sizeof(range
), req
);
2520 * AIO callbacks may be called immediately, so initialize discards to 1
2521 * to make sure the the callback does not complete the request before
2522 * all discards have been issued.
2526 for (int i
= 0; i
< nr
; i
++) {
2527 uint64_t slba
= le64_to_cpu(range
[i
].slba
);
2528 uint32_t nlb
= le32_to_cpu(range
[i
].nlb
);
2530 if (nvme_check_bounds(ns
, slba
, nlb
)) {
2531 trace_pci_nvme_err_invalid_lba_range(slba
, nlb
,
2536 trace_pci_nvme_dsm_deallocate(nvme_cid(req
), nvme_nsid(ns
), slba
,
2539 if (nlb
> n
->dmrsl
) {
2540 trace_pci_nvme_dsm_single_range_limit_exceeded(nlb
, n
->dmrsl
);
2543 offset
= nvme_l2b(ns
, slba
);
2544 len
= nvme_l2b(ns
, nlb
);
2547 size_t bytes
= MIN(BDRV_REQUEST_MAX_BYTES
, len
);
2551 blk_aio_pdiscard(ns
->blkconf
.blk
, offset
, bytes
,
2552 nvme_aio_discard_cb
, req
);
2559 /* account for the 1-initialization */
2563 status
= NVME_NO_COMPLETE
;
2565 status
= req
->status
;
2572 static uint16_t nvme_verify(NvmeCtrl
*n
, NvmeRequest
*req
)
2574 NvmeRwCmd
*rw
= (NvmeRwCmd
*)&req
->cmd
;
2575 NvmeNamespace
*ns
= req
->ns
;
2576 BlockBackend
*blk
= ns
->blkconf
.blk
;
2577 uint64_t slba
= le64_to_cpu(rw
->slba
);
2578 uint32_t nlb
= le16_to_cpu(rw
->nlb
) + 1;
2579 size_t len
= nvme_l2b(ns
, nlb
);
2580 int64_t offset
= nvme_l2b(ns
, slba
);
2581 uint16_t ctrl
= le16_to_cpu(rw
->control
);
2582 uint32_t reftag
= le32_to_cpu(rw
->reftag
);
2583 NvmeBounceContext
*ctx
= NULL
;
2586 trace_pci_nvme_verify(nvme_cid(req
), nvme_nsid(ns
), slba
, nlb
);
2588 if (NVME_ID_NS_DPS_TYPE(ns
->id_ns
.dps
)) {
2589 status
= nvme_check_prinfo(ns
, ctrl
, slba
, reftag
);
2594 if (ctrl
& NVME_RW_PRINFO_PRACT
) {
2595 return NVME_INVALID_PROT_INFO
| NVME_DNR
;
2599 if (len
> n
->page_size
<< n
->params
.vsl
) {
2600 return NVME_INVALID_FIELD
| NVME_DNR
;
2603 status
= nvme_check_bounds(ns
, slba
, nlb
);
2605 trace_pci_nvme_err_invalid_lba_range(slba
, nlb
, ns
->id_ns
.nsze
);
2609 if (NVME_ERR_REC_DULBE(ns
->features
.err_rec
)) {
2610 status
= nvme_check_dulbe(ns
, slba
, nlb
);
2616 ctx
= g_new0(NvmeBounceContext
, 1);
2619 ctx
->data
.bounce
= g_malloc(len
);
2621 qemu_iovec_init(&ctx
->data
.iov
, 1);
2622 qemu_iovec_add(&ctx
->data
.iov
, ctx
->data
.bounce
, len
);
2624 block_acct_start(blk_get_stats(blk
), &req
->acct
, ctx
->data
.iov
.size
,
2627 req
->aiocb
= blk_aio_preadv(ns
->blkconf
.blk
, offset
, &ctx
->data
.iov
, 0,
2628 nvme_verify_mdata_in_cb
, ctx
);
2629 return NVME_NO_COMPLETE
;
2632 static uint16_t nvme_copy(NvmeCtrl
*n
, NvmeRequest
*req
)
2634 NvmeNamespace
*ns
= req
->ns
;
2635 NvmeCopyCmd
*copy
= (NvmeCopyCmd
*)&req
->cmd
;
2637 uint16_t nr
= copy
->nr
+ 1;
2638 uint8_t format
= copy
->control
[0] & 0xf;
2641 * Shift the PRINFOR/PRINFOW values by 10 to allow reusing the
2642 * NVME_RW_PRINFO constants.
2644 uint16_t prinfor
= ((copy
->control
[0] >> 4) & 0xf) << 10;
2645 uint16_t prinfow
= ((copy
->control
[2] >> 2) & 0xf) << 10;
2648 uint8_t *bounce
= NULL
, *bouncep
= NULL
;
2649 uint8_t *mbounce
= NULL
, *mbouncep
= NULL
;
2650 struct nvme_copy_ctx
*ctx
;
2654 trace_pci_nvme_copy(nvme_cid(req
), nvme_nsid(ns
), nr
, format
);
2656 if (NVME_ID_NS_DPS_TYPE(ns
->id_ns
.dps
) &&
2657 ((prinfor
& NVME_RW_PRINFO_PRACT
) != (prinfow
& NVME_RW_PRINFO_PRACT
))) {
2658 return NVME_INVALID_FIELD
| NVME_DNR
;
2661 if (!(n
->id_ctrl
.ocfs
& (1 << format
))) {
2662 trace_pci_nvme_err_copy_invalid_format(format
);
2663 return NVME_INVALID_FIELD
| NVME_DNR
;
2666 if (nr
> ns
->id_ns
.msrc
+ 1) {
2667 return NVME_CMD_SIZE_LIMIT
| NVME_DNR
;
2670 ctx
= g_new(struct nvme_copy_ctx
, 1);
2671 ctx
->ranges
= g_new(NvmeCopySourceRange
, nr
);
2673 status
= nvme_h2c(n
, (uint8_t *)ctx
->ranges
,
2674 nr
* sizeof(NvmeCopySourceRange
), req
);
2679 for (i
= 0; i
< nr
; i
++) {
2680 uint64_t slba
= le64_to_cpu(ctx
->ranges
[i
].slba
);
2681 uint32_t _nlb
= le16_to_cpu(ctx
->ranges
[i
].nlb
) + 1;
2683 if (_nlb
> le16_to_cpu(ns
->id_ns
.mssrl
)) {
2684 status
= NVME_CMD_SIZE_LIMIT
| NVME_DNR
;
2688 status
= nvme_check_bounds(ns
, slba
, _nlb
);
2690 trace_pci_nvme_err_invalid_lba_range(slba
, _nlb
, ns
->id_ns
.nsze
);
2694 if (NVME_ERR_REC_DULBE(ns
->features
.err_rec
)) {
2695 status
= nvme_check_dulbe(ns
, slba
, _nlb
);
2701 if (ns
->params
.zoned
) {
2702 status
= nvme_check_zone_read(ns
, slba
, _nlb
);
2711 if (nlb
> le32_to_cpu(ns
->id_ns
.mcl
)) {
2712 status
= NVME_CMD_SIZE_LIMIT
| NVME_DNR
;
2716 bounce
= bouncep
= g_malloc(nvme_l2b(ns
, nlb
));
2717 if (nvme_msize(ns
)) {
2718 mbounce
= mbouncep
= g_malloc(nvme_m2b(ns
, nlb
));
2721 block_acct_start(blk_get_stats(ns
->blkconf
.blk
), &req
->acct
, 0,
2724 ctx
->bounce
= bounce
;
2725 ctx
->mbounce
= mbounce
;
2731 for (i
= 0; i
< nr
; i
++) {
2732 uint64_t slba
= le64_to_cpu(ctx
->ranges
[i
].slba
);
2733 uint32_t nlb
= le16_to_cpu(ctx
->ranges
[i
].nlb
) + 1;
2735 size_t len
= nvme_l2b(ns
, nlb
);
2736 int64_t offset
= nvme_l2b(ns
, slba
);
2738 trace_pci_nvme_copy_source_range(slba
, nlb
);
2740 struct nvme_copy_in_ctx
*in_ctx
= g_new(struct nvme_copy_in_ctx
, 1);
2743 qemu_iovec_init(&in_ctx
->iov
, 1);
2744 qemu_iovec_add(&in_ctx
->iov
, bouncep
, len
);
2748 blk_aio_preadv(ns
->blkconf
.blk
, offset
, &in_ctx
->iov
, 0,
2749 nvme_aio_copy_in_cb
, in_ctx
);
2753 if (nvme_msize(ns
)) {
2754 len
= nvme_m2b(ns
, nlb
);
2755 offset
= ns
->mdata_offset
+ nvme_m2b(ns
, slba
);
2757 in_ctx
= g_new(struct nvme_copy_in_ctx
, 1);
2760 qemu_iovec_init(&in_ctx
->iov
, 1);
2761 qemu_iovec_add(&in_ctx
->iov
, mbouncep
, len
);
2765 blk_aio_preadv(ns
->blkconf
.blk
, offset
, &in_ctx
->iov
, 0,
2766 nvme_aio_copy_in_cb
, in_ctx
);
2772 /* account for the 1-initialization */
2776 nvme_copy_in_complete(req
);
2779 return NVME_NO_COMPLETE
;
2782 g_free(ctx
->ranges
);
2788 static uint16_t nvme_compare(NvmeCtrl
*n
, NvmeRequest
*req
)
2790 NvmeRwCmd
*rw
= (NvmeRwCmd
*)&req
->cmd
;
2791 NvmeNamespace
*ns
= req
->ns
;
2792 BlockBackend
*blk
= ns
->blkconf
.blk
;
2793 uint64_t slba
= le64_to_cpu(rw
->slba
);
2794 uint32_t nlb
= le16_to_cpu(rw
->nlb
) + 1;
2795 uint16_t ctrl
= le16_to_cpu(rw
->control
);
2796 size_t data_len
= nvme_l2b(ns
, nlb
);
2797 size_t len
= data_len
;
2798 int64_t offset
= nvme_l2b(ns
, slba
);
2799 struct nvme_compare_ctx
*ctx
= NULL
;
2802 trace_pci_nvme_compare(nvme_cid(req
), nvme_nsid(ns
), slba
, nlb
);
2804 if (NVME_ID_NS_DPS_TYPE(ns
->id_ns
.dps
) && (ctrl
& NVME_RW_PRINFO_PRACT
)) {
2805 return NVME_INVALID_PROT_INFO
| NVME_DNR
;
2808 if (nvme_ns_ext(ns
)) {
2809 len
+= nvme_m2b(ns
, nlb
);
2812 status
= nvme_check_mdts(n
, len
);
2817 status
= nvme_check_bounds(ns
, slba
, nlb
);
2819 trace_pci_nvme_err_invalid_lba_range(slba
, nlb
, ns
->id_ns
.nsze
);
2823 if (NVME_ERR_REC_DULBE(ns
->features
.err_rec
)) {
2824 status
= nvme_check_dulbe(ns
, slba
, nlb
);
2830 status
= nvme_map_dptr(n
, &req
->sg
, len
, &req
->cmd
);
2835 ctx
= g_new(struct nvme_compare_ctx
, 1);
2836 ctx
->data
.bounce
= g_malloc(data_len
);
2840 qemu_iovec_init(&ctx
->data
.iov
, 1);
2841 qemu_iovec_add(&ctx
->data
.iov
, ctx
->data
.bounce
, data_len
);
2843 block_acct_start(blk_get_stats(blk
), &req
->acct
, data_len
,
2845 blk_aio_preadv(blk
, offset
, &ctx
->data
.iov
, 0, nvme_compare_data_cb
, req
);
2847 return NVME_NO_COMPLETE
;
2850 static uint16_t nvme_flush(NvmeCtrl
*n
, NvmeRequest
*req
)
2852 uint32_t nsid
= le32_to_cpu(req
->cmd
.nsid
);
2853 uintptr_t *num_flushes
= (uintptr_t *)&req
->opaque
;
2855 struct nvme_aio_flush_ctx
*ctx
;
2858 trace_pci_nvme_flush(nvme_cid(req
), nsid
);
2860 if (nsid
!= NVME_NSID_BROADCAST
) {
2861 req
->ns
= nvme_ns(n
, nsid
);
2862 if (unlikely(!req
->ns
)) {
2863 return NVME_INVALID_FIELD
| NVME_DNR
;
2866 block_acct_start(blk_get_stats(req
->ns
->blkconf
.blk
), &req
->acct
, 0,
2868 req
->aiocb
= blk_aio_flush(req
->ns
->blkconf
.blk
, nvme_misc_cb
, req
);
2869 return NVME_NO_COMPLETE
;
2872 /* 1-initialize; see comment in nvme_dsm */
2875 for (int i
= 1; i
<= n
->num_namespaces
; i
++) {
2881 ctx
= g_new(struct nvme_aio_flush_ctx
, 1);
2887 block_acct_start(blk_get_stats(ns
->blkconf
.blk
), &ctx
->acct
, 0,
2889 blk_aio_flush(ns
->blkconf
.blk
, nvme_aio_flush_cb
, ctx
);
2892 /* account for the 1-initialization */
2896 status
= NVME_NO_COMPLETE
;
2898 status
= req
->status
;
2904 static uint16_t nvme_read(NvmeCtrl
*n
, NvmeRequest
*req
)
2906 NvmeRwCmd
*rw
= (NvmeRwCmd
*)&req
->cmd
;
2907 NvmeNamespace
*ns
= req
->ns
;
2908 uint64_t slba
= le64_to_cpu(rw
->slba
);
2909 uint32_t nlb
= (uint32_t)le16_to_cpu(rw
->nlb
) + 1;
2910 uint16_t ctrl
= le16_to_cpu(rw
->control
);
2911 uint64_t data_size
= nvme_l2b(ns
, nlb
);
2912 uint64_t mapped_size
= data_size
;
2913 uint64_t data_offset
;
2914 BlockBackend
*blk
= ns
->blkconf
.blk
;
2917 if (nvme_ns_ext(ns
)) {
2918 mapped_size
+= nvme_m2b(ns
, nlb
);
2920 if (NVME_ID_NS_DPS_TYPE(ns
->id_ns
.dps
)) {
2921 bool pract
= ctrl
& NVME_RW_PRINFO_PRACT
;
2923 if (pract
&& nvme_msize(ns
) == 8) {
2924 mapped_size
= data_size
;
2929 trace_pci_nvme_read(nvme_cid(req
), nvme_nsid(ns
), nlb
, mapped_size
, slba
);
2931 status
= nvme_check_mdts(n
, mapped_size
);
2936 status
= nvme_check_bounds(ns
, slba
, nlb
);
2938 trace_pci_nvme_err_invalid_lba_range(slba
, nlb
, ns
->id_ns
.nsze
);
2942 if (ns
->params
.zoned
) {
2943 status
= nvme_check_zone_read(ns
, slba
, nlb
);
2945 trace_pci_nvme_err_zone_read_not_ok(slba
, nlb
, status
);
2950 if (NVME_ERR_REC_DULBE(ns
->features
.err_rec
)) {
2951 status
= nvme_check_dulbe(ns
, slba
, nlb
);
2957 if (NVME_ID_NS_DPS_TYPE(ns
->id_ns
.dps
)) {
2958 return nvme_dif_rw(n
, req
);
2961 status
= nvme_map_data(n
, nlb
, req
);
2966 data_offset
= nvme_l2b(ns
, slba
);
2968 block_acct_start(blk_get_stats(blk
), &req
->acct
, data_size
,
2970 nvme_blk_read(blk
, data_offset
, nvme_rw_cb
, req
);
2971 return NVME_NO_COMPLETE
;
2974 block_acct_invalid(blk_get_stats(blk
), BLOCK_ACCT_READ
);
2975 return status
| NVME_DNR
;
2978 static uint16_t nvme_do_write(NvmeCtrl
*n
, NvmeRequest
*req
, bool append
,
2981 NvmeRwCmd
*rw
= (NvmeRwCmd
*)&req
->cmd
;
2982 NvmeNamespace
*ns
= req
->ns
;
2983 uint64_t slba
= le64_to_cpu(rw
->slba
);
2984 uint32_t nlb
= (uint32_t)le16_to_cpu(rw
->nlb
) + 1;
2985 uint16_t ctrl
= le16_to_cpu(rw
->control
);
2986 uint64_t data_size
= nvme_l2b(ns
, nlb
);
2987 uint64_t mapped_size
= data_size
;
2988 uint64_t data_offset
;
2990 NvmeZonedResult
*res
= (NvmeZonedResult
*)&req
->cqe
;
2991 BlockBackend
*blk
= ns
->blkconf
.blk
;
2994 if (nvme_ns_ext(ns
)) {
2995 mapped_size
+= nvme_m2b(ns
, nlb
);
2997 if (NVME_ID_NS_DPS_TYPE(ns
->id_ns
.dps
)) {
2998 bool pract
= ctrl
& NVME_RW_PRINFO_PRACT
;
3000 if (pract
&& nvme_msize(ns
) == 8) {
3001 mapped_size
-= nvme_m2b(ns
, nlb
);
3006 trace_pci_nvme_write(nvme_cid(req
), nvme_io_opc_str(rw
->opcode
),
3007 nvme_nsid(ns
), nlb
, mapped_size
, slba
);
3010 status
= nvme_check_mdts(n
, mapped_size
);
3016 status
= nvme_check_bounds(ns
, slba
, nlb
);
3018 trace_pci_nvme_err_invalid_lba_range(slba
, nlb
, ns
->id_ns
.nsze
);
3022 if (ns
->params
.zoned
) {
3023 zone
= nvme_get_zone_by_slba(ns
, slba
);
3026 bool piremap
= !!(ctrl
& NVME_RW_PIREMAP
);
3028 if (unlikely(slba
!= zone
->d
.zslba
)) {
3029 trace_pci_nvme_err_append_not_at_start(slba
, zone
->d
.zslba
);
3030 status
= NVME_INVALID_FIELD
;
3034 if (n
->params
.zasl
&&
3035 data_size
> (uint64_t)n
->page_size
<< n
->params
.zasl
) {
3036 trace_pci_nvme_err_zasl(data_size
);
3037 return NVME_INVALID_FIELD
| NVME_DNR
;
3041 rw
->slba
= cpu_to_le64(slba
);
3042 res
->slba
= cpu_to_le64(slba
);
3044 switch (NVME_ID_NS_DPS_TYPE(ns
->id_ns
.dps
)) {
3045 case NVME_ID_NS_DPS_TYPE_1
:
3047 return NVME_INVALID_PROT_INFO
| NVME_DNR
;
3052 case NVME_ID_NS_DPS_TYPE_2
:
3054 uint32_t reftag
= le32_to_cpu(rw
->reftag
);
3055 rw
->reftag
= cpu_to_le32(reftag
+ (slba
- zone
->d
.zslba
));
3060 case NVME_ID_NS_DPS_TYPE_3
:
3062 return NVME_INVALID_PROT_INFO
| NVME_DNR
;
3069 status
= nvme_check_zone_write(ns
, zone
, slba
, nlb
);
3074 status
= nvme_zrm_auto(ns
, zone
);
3082 data_offset
= nvme_l2b(ns
, slba
);
3084 if (NVME_ID_NS_DPS_TYPE(ns
->id_ns
.dps
)) {
3085 return nvme_dif_rw(n
, req
);
3089 status
= nvme_map_data(n
, nlb
, req
);
3094 block_acct_start(blk_get_stats(blk
), &req
->acct
, data_size
,
3096 nvme_blk_write(blk
, data_offset
, nvme_rw_cb
, req
);
3098 req
->aiocb
= blk_aio_pwrite_zeroes(blk
, data_offset
, data_size
,
3099 BDRV_REQ_MAY_UNMAP
, nvme_rw_cb
,
3103 return NVME_NO_COMPLETE
;
3106 block_acct_invalid(blk_get_stats(blk
), BLOCK_ACCT_WRITE
);
3107 return status
| NVME_DNR
;
3110 static inline uint16_t nvme_write(NvmeCtrl
*n
, NvmeRequest
*req
)
3112 return nvme_do_write(n
, req
, false, false);
3115 static inline uint16_t nvme_write_zeroes(NvmeCtrl
*n
, NvmeRequest
*req
)
3117 return nvme_do_write(n
, req
, false, true);
3120 static inline uint16_t nvme_zone_append(NvmeCtrl
*n
, NvmeRequest
*req
)
3122 return nvme_do_write(n
, req
, true, false);
3125 static uint16_t nvme_get_mgmt_zone_slba_idx(NvmeNamespace
*ns
, NvmeCmd
*c
,
3126 uint64_t *slba
, uint32_t *zone_idx
)
3128 uint32_t dw10
= le32_to_cpu(c
->cdw10
);
3129 uint32_t dw11
= le32_to_cpu(c
->cdw11
);
3131 if (!ns
->params
.zoned
) {
3132 trace_pci_nvme_err_invalid_opc(c
->opcode
);
3133 return NVME_INVALID_OPCODE
| NVME_DNR
;
3136 *slba
= ((uint64_t)dw11
) << 32 | dw10
;
3137 if (unlikely(*slba
>= ns
->id_ns
.nsze
)) {
3138 trace_pci_nvme_err_invalid_lba_range(*slba
, 0, ns
->id_ns
.nsze
);
3140 return NVME_LBA_RANGE
| NVME_DNR
;
3143 *zone_idx
= nvme_zone_idx(ns
, *slba
);
3144 assert(*zone_idx
< ns
->num_zones
);
3146 return NVME_SUCCESS
;
3149 typedef uint16_t (*op_handler_t
)(NvmeNamespace
*, NvmeZone
*, NvmeZoneState
,
3152 enum NvmeZoneProcessingMask
{
3153 NVME_PROC_CURRENT_ZONE
= 0,
3154 NVME_PROC_OPENED_ZONES
= 1 << 0,
3155 NVME_PROC_CLOSED_ZONES
= 1 << 1,
3156 NVME_PROC_READ_ONLY_ZONES
= 1 << 2,
3157 NVME_PROC_FULL_ZONES
= 1 << 3,
3160 static uint16_t nvme_open_zone(NvmeNamespace
*ns
, NvmeZone
*zone
,
3161 NvmeZoneState state
, NvmeRequest
*req
)
3163 return nvme_zrm_open(ns
, zone
);
3166 static uint16_t nvme_close_zone(NvmeNamespace
*ns
, NvmeZone
*zone
,
3167 NvmeZoneState state
, NvmeRequest
*req
)
3169 return nvme_zrm_close(ns
, zone
);
3172 static uint16_t nvme_finish_zone(NvmeNamespace
*ns
, NvmeZone
*zone
,
3173 NvmeZoneState state
, NvmeRequest
*req
)
3175 return nvme_zrm_finish(ns
, zone
);
3178 static uint16_t nvme_reset_zone(NvmeNamespace
*ns
, NvmeZone
*zone
,
3179 NvmeZoneState state
, NvmeRequest
*req
)
3181 uintptr_t *resets
= (uintptr_t *)&req
->opaque
;
3182 struct nvme_zone_reset_ctx
*ctx
;
3185 case NVME_ZONE_STATE_EMPTY
:
3186 return NVME_SUCCESS
;
3187 case NVME_ZONE_STATE_EXPLICITLY_OPEN
:
3188 case NVME_ZONE_STATE_IMPLICITLY_OPEN
:
3189 case NVME_ZONE_STATE_CLOSED
:
3190 case NVME_ZONE_STATE_FULL
:
3193 return NVME_ZONE_INVAL_TRANSITION
;
3197 * The zone reset aio callback needs to know the zone that is being reset
3198 * in order to transition the zone on completion.
3200 ctx
= g_new(struct nvme_zone_reset_ctx
, 1);
3206 blk_aio_pwrite_zeroes(ns
->blkconf
.blk
, nvme_l2b(ns
, zone
->d
.zslba
),
3207 nvme_l2b(ns
, ns
->zone_size
), BDRV_REQ_MAY_UNMAP
,
3208 nvme_aio_zone_reset_cb
, ctx
);
3210 return NVME_NO_COMPLETE
;
3213 static uint16_t nvme_offline_zone(NvmeNamespace
*ns
, NvmeZone
*zone
,
3214 NvmeZoneState state
, NvmeRequest
*req
)
3217 case NVME_ZONE_STATE_READ_ONLY
:
3218 nvme_assign_zone_state(ns
, zone
, NVME_ZONE_STATE_OFFLINE
);
3220 case NVME_ZONE_STATE_OFFLINE
:
3221 return NVME_SUCCESS
;
3223 return NVME_ZONE_INVAL_TRANSITION
;
3227 static uint16_t nvme_set_zd_ext(NvmeNamespace
*ns
, NvmeZone
*zone
)
3230 uint8_t state
= nvme_get_zone_state(zone
);
3232 if (state
== NVME_ZONE_STATE_EMPTY
) {
3233 status
= nvme_aor_check(ns
, 1, 0);
3237 nvme_aor_inc_active(ns
);
3238 zone
->d
.za
|= NVME_ZA_ZD_EXT_VALID
;
3239 nvme_assign_zone_state(ns
, zone
, NVME_ZONE_STATE_CLOSED
);
3240 return NVME_SUCCESS
;
3243 return NVME_ZONE_INVAL_TRANSITION
;
3246 static uint16_t nvme_bulk_proc_zone(NvmeNamespace
*ns
, NvmeZone
*zone
,
3247 enum NvmeZoneProcessingMask proc_mask
,
3248 op_handler_t op_hndlr
, NvmeRequest
*req
)
3250 uint16_t status
= NVME_SUCCESS
;
3251 NvmeZoneState zs
= nvme_get_zone_state(zone
);
3255 case NVME_ZONE_STATE_IMPLICITLY_OPEN
:
3256 case NVME_ZONE_STATE_EXPLICITLY_OPEN
:
3257 proc_zone
= proc_mask
& NVME_PROC_OPENED_ZONES
;
3259 case NVME_ZONE_STATE_CLOSED
:
3260 proc_zone
= proc_mask
& NVME_PROC_CLOSED_ZONES
;
3262 case NVME_ZONE_STATE_READ_ONLY
:
3263 proc_zone
= proc_mask
& NVME_PROC_READ_ONLY_ZONES
;
3265 case NVME_ZONE_STATE_FULL
:
3266 proc_zone
= proc_mask
& NVME_PROC_FULL_ZONES
;
3273 status
= op_hndlr(ns
, zone
, zs
, req
);
3279 static uint16_t nvme_do_zone_op(NvmeNamespace
*ns
, NvmeZone
*zone
,
3280 enum NvmeZoneProcessingMask proc_mask
,
3281 op_handler_t op_hndlr
, NvmeRequest
*req
)
3284 uint16_t status
= NVME_SUCCESS
;
3288 status
= op_hndlr(ns
, zone
, nvme_get_zone_state(zone
), req
);
3290 if (proc_mask
& NVME_PROC_CLOSED_ZONES
) {
3291 QTAILQ_FOREACH_SAFE(zone
, &ns
->closed_zones
, entry
, next
) {
3292 status
= nvme_bulk_proc_zone(ns
, zone
, proc_mask
, op_hndlr
,
3294 if (status
&& status
!= NVME_NO_COMPLETE
) {
3299 if (proc_mask
& NVME_PROC_OPENED_ZONES
) {
3300 QTAILQ_FOREACH_SAFE(zone
, &ns
->imp_open_zones
, entry
, next
) {
3301 status
= nvme_bulk_proc_zone(ns
, zone
, proc_mask
, op_hndlr
,
3303 if (status
&& status
!= NVME_NO_COMPLETE
) {
3308 QTAILQ_FOREACH_SAFE(zone
, &ns
->exp_open_zones
, entry
, next
) {
3309 status
= nvme_bulk_proc_zone(ns
, zone
, proc_mask
, op_hndlr
,
3311 if (status
&& status
!= NVME_NO_COMPLETE
) {
3316 if (proc_mask
& NVME_PROC_FULL_ZONES
) {
3317 QTAILQ_FOREACH_SAFE(zone
, &ns
->full_zones
, entry
, next
) {
3318 status
= nvme_bulk_proc_zone(ns
, zone
, proc_mask
, op_hndlr
,
3320 if (status
&& status
!= NVME_NO_COMPLETE
) {
3326 if (proc_mask
& NVME_PROC_READ_ONLY_ZONES
) {
3327 for (i
= 0; i
< ns
->num_zones
; i
++, zone
++) {
3328 status
= nvme_bulk_proc_zone(ns
, zone
, proc_mask
, op_hndlr
,
3330 if (status
&& status
!= NVME_NO_COMPLETE
) {
3341 static uint16_t nvme_zone_mgmt_send(NvmeCtrl
*n
, NvmeRequest
*req
)
3343 NvmeCmd
*cmd
= (NvmeCmd
*)&req
->cmd
;
3344 NvmeNamespace
*ns
= req
->ns
;
3348 uint32_t dw13
= le32_to_cpu(cmd
->cdw13
);
3350 uint32_t zone_idx
= 0;
3354 enum NvmeZoneProcessingMask proc_mask
= NVME_PROC_CURRENT_ZONE
;
3356 action
= dw13
& 0xff;
3359 req
->status
= NVME_SUCCESS
;
3362 status
= nvme_get_mgmt_zone_slba_idx(ns
, cmd
, &slba
, &zone_idx
);
3368 zone
= &ns
->zone_array
[zone_idx
];
3369 if (slba
!= zone
->d
.zslba
) {
3370 trace_pci_nvme_err_unaligned_zone_cmd(action
, slba
, zone
->d
.zslba
);
3371 return NVME_INVALID_FIELD
| NVME_DNR
;
3376 case NVME_ZONE_ACTION_OPEN
:
3378 proc_mask
= NVME_PROC_CLOSED_ZONES
;
3380 trace_pci_nvme_open_zone(slba
, zone_idx
, all
);
3381 status
= nvme_do_zone_op(ns
, zone
, proc_mask
, nvme_open_zone
, req
);
3384 case NVME_ZONE_ACTION_CLOSE
:
3386 proc_mask
= NVME_PROC_OPENED_ZONES
;
3388 trace_pci_nvme_close_zone(slba
, zone_idx
, all
);
3389 status
= nvme_do_zone_op(ns
, zone
, proc_mask
, nvme_close_zone
, req
);
3392 case NVME_ZONE_ACTION_FINISH
:
3394 proc_mask
= NVME_PROC_OPENED_ZONES
| NVME_PROC_CLOSED_ZONES
;
3396 trace_pci_nvme_finish_zone(slba
, zone_idx
, all
);
3397 status
= nvme_do_zone_op(ns
, zone
, proc_mask
, nvme_finish_zone
, req
);
3400 case NVME_ZONE_ACTION_RESET
:
3401 resets
= (uintptr_t *)&req
->opaque
;
3404 proc_mask
= NVME_PROC_OPENED_ZONES
| NVME_PROC_CLOSED_ZONES
|
3405 NVME_PROC_FULL_ZONES
;
3407 trace_pci_nvme_reset_zone(slba
, zone_idx
, all
);
3411 status
= nvme_do_zone_op(ns
, zone
, proc_mask
, nvme_reset_zone
, req
);
3415 return *resets
? NVME_NO_COMPLETE
: req
->status
;
3417 case NVME_ZONE_ACTION_OFFLINE
:
3419 proc_mask
= NVME_PROC_READ_ONLY_ZONES
;
3421 trace_pci_nvme_offline_zone(slba
, zone_idx
, all
);
3422 status
= nvme_do_zone_op(ns
, zone
, proc_mask
, nvme_offline_zone
, req
);
3425 case NVME_ZONE_ACTION_SET_ZD_EXT
:
3426 trace_pci_nvme_set_descriptor_extension(slba
, zone_idx
);
3427 if (all
|| !ns
->params
.zd_extension_size
) {
3428 return NVME_INVALID_FIELD
| NVME_DNR
;
3430 zd_ext
= nvme_get_zd_extension(ns
, zone_idx
);
3431 status
= nvme_h2c(n
, zd_ext
, ns
->params
.zd_extension_size
, req
);
3433 trace_pci_nvme_err_zd_extension_map_error(zone_idx
);
3437 status
= nvme_set_zd_ext(ns
, zone
);
3438 if (status
== NVME_SUCCESS
) {
3439 trace_pci_nvme_zd_extension_set(zone_idx
);
3445 trace_pci_nvme_err_invalid_mgmt_action(action
);
3446 status
= NVME_INVALID_FIELD
;
3449 if (status
== NVME_ZONE_INVAL_TRANSITION
) {
3450 trace_pci_nvme_err_invalid_zone_state_transition(action
, slba
,
3460 static bool nvme_zone_matches_filter(uint32_t zafs
, NvmeZone
*zl
)
3462 NvmeZoneState zs
= nvme_get_zone_state(zl
);
3465 case NVME_ZONE_REPORT_ALL
:
3467 case NVME_ZONE_REPORT_EMPTY
:
3468 return zs
== NVME_ZONE_STATE_EMPTY
;
3469 case NVME_ZONE_REPORT_IMPLICITLY_OPEN
:
3470 return zs
== NVME_ZONE_STATE_IMPLICITLY_OPEN
;
3471 case NVME_ZONE_REPORT_EXPLICITLY_OPEN
:
3472 return zs
== NVME_ZONE_STATE_EXPLICITLY_OPEN
;
3473 case NVME_ZONE_REPORT_CLOSED
:
3474 return zs
== NVME_ZONE_STATE_CLOSED
;
3475 case NVME_ZONE_REPORT_FULL
:
3476 return zs
== NVME_ZONE_STATE_FULL
;
3477 case NVME_ZONE_REPORT_READ_ONLY
:
3478 return zs
== NVME_ZONE_STATE_READ_ONLY
;
3479 case NVME_ZONE_REPORT_OFFLINE
:
3480 return zs
== NVME_ZONE_STATE_OFFLINE
;
3486 static uint16_t nvme_zone_mgmt_recv(NvmeCtrl
*n
, NvmeRequest
*req
)
3488 NvmeCmd
*cmd
= (NvmeCmd
*)&req
->cmd
;
3489 NvmeNamespace
*ns
= req
->ns
;
3490 /* cdw12 is zero-based number of dwords to return. Convert to bytes */
3491 uint32_t data_size
= (le32_to_cpu(cmd
->cdw12
) + 1) << 2;
3492 uint32_t dw13
= le32_to_cpu(cmd
->cdw13
);
3493 uint32_t zone_idx
, zra
, zrasf
, partial
;
3494 uint64_t max_zones
, nr_zones
= 0;
3499 NvmeZoneReportHeader
*header
;
3501 size_t zone_entry_sz
;
3504 req
->status
= NVME_SUCCESS
;
3506 status
= nvme_get_mgmt_zone_slba_idx(ns
, cmd
, &slba
, &zone_idx
);
3512 if (zra
!= NVME_ZONE_REPORT
&& zra
!= NVME_ZONE_REPORT_EXTENDED
) {
3513 return NVME_INVALID_FIELD
| NVME_DNR
;
3515 if (zra
== NVME_ZONE_REPORT_EXTENDED
&& !ns
->params
.zd_extension_size
) {
3516 return NVME_INVALID_FIELD
| NVME_DNR
;
3519 zrasf
= (dw13
>> 8) & 0xff;
3520 if (zrasf
> NVME_ZONE_REPORT_OFFLINE
) {
3521 return NVME_INVALID_FIELD
| NVME_DNR
;
3524 if (data_size
< sizeof(NvmeZoneReportHeader
)) {
3525 return NVME_INVALID_FIELD
| NVME_DNR
;
3528 status
= nvme_check_mdts(n
, data_size
);
3533 partial
= (dw13
>> 16) & 0x01;
3535 zone_entry_sz
= sizeof(NvmeZoneDescr
);
3536 if (zra
== NVME_ZONE_REPORT_EXTENDED
) {
3537 zone_entry_sz
+= ns
->params
.zd_extension_size
;
3540 max_zones
= (data_size
- sizeof(NvmeZoneReportHeader
)) / zone_entry_sz
;
3541 buf
= g_malloc0(data_size
);
3543 zone
= &ns
->zone_array
[zone_idx
];
3544 for (i
= zone_idx
; i
< ns
->num_zones
; i
++) {
3545 if (partial
&& nr_zones
>= max_zones
) {
3548 if (nvme_zone_matches_filter(zrasf
, zone
++)) {
3552 header
= (NvmeZoneReportHeader
*)buf
;
3553 header
->nr_zones
= cpu_to_le64(nr_zones
);
3555 buf_p
= buf
+ sizeof(NvmeZoneReportHeader
);
3556 for (; zone_idx
< ns
->num_zones
&& max_zones
> 0; zone_idx
++) {
3557 zone
= &ns
->zone_array
[zone_idx
];
3558 if (nvme_zone_matches_filter(zrasf
, zone
)) {
3559 z
= (NvmeZoneDescr
*)buf_p
;
3560 buf_p
+= sizeof(NvmeZoneDescr
);
3564 z
->zcap
= cpu_to_le64(zone
->d
.zcap
);
3565 z
->zslba
= cpu_to_le64(zone
->d
.zslba
);
3568 if (nvme_wp_is_valid(zone
)) {
3569 z
->wp
= cpu_to_le64(zone
->d
.wp
);
3571 z
->wp
= cpu_to_le64(~0ULL);
3574 if (zra
== NVME_ZONE_REPORT_EXTENDED
) {
3575 if (zone
->d
.za
& NVME_ZA_ZD_EXT_VALID
) {
3576 memcpy(buf_p
, nvme_get_zd_extension(ns
, zone_idx
),
3577 ns
->params
.zd_extension_size
);
3579 buf_p
+= ns
->params
.zd_extension_size
;
3586 status
= nvme_c2h(n
, (uint8_t *)buf
, data_size
, req
);
3593 static uint16_t nvme_io_cmd(NvmeCtrl
*n
, NvmeRequest
*req
)
3595 uint32_t nsid
= le32_to_cpu(req
->cmd
.nsid
);
3598 trace_pci_nvme_io_cmd(nvme_cid(req
), nsid
, nvme_sqid(req
),
3599 req
->cmd
.opcode
, nvme_io_opc_str(req
->cmd
.opcode
));
3601 if (!nvme_nsid_valid(n
, nsid
)) {
3602 return NVME_INVALID_NSID
| NVME_DNR
;
3606 * In the base NVM command set, Flush may apply to all namespaces
3607 * (indicated by NSID being set to 0xFFFFFFFF). But if that feature is used
3608 * along with TP 4056 (Namespace Types), it may be pretty screwed up.
3610 * If NSID is indeed set to 0xFFFFFFFF, we simply cannot associate the
3611 * opcode with a specific command since we cannot determine a unique I/O
3612 * command set. Opcode 0x0 could have any other meaning than something
3613 * equivalent to flushing and say it DOES have completely different
3614 * semantics in some other command set - does an NSID of 0xFFFFFFFF then
3615 * mean "for all namespaces, apply whatever command set specific command
3616 * that uses the 0x0 opcode?" Or does it mean "for all namespaces, apply
3617 * whatever command that uses the 0x0 opcode if, and only if, it allows
3618 * NSID to be 0xFFFFFFFF"?
3620 * Anyway (and luckily), for now, we do not care about this since the
3621 * device only supports namespace types that includes the NVM Flush command
3622 * (NVM and Zoned), so always do an NVM Flush.
3624 if (req
->cmd
.opcode
== NVME_CMD_FLUSH
) {
3625 return nvme_flush(n
, req
);
3628 req
->ns
= nvme_ns(n
, nsid
);
3629 if (unlikely(!req
->ns
)) {
3630 return NVME_INVALID_FIELD
| NVME_DNR
;
3633 if (!(req
->ns
->iocs
[req
->cmd
.opcode
] & NVME_CMD_EFF_CSUPP
)) {
3634 trace_pci_nvme_err_invalid_opc(req
->cmd
.opcode
);
3635 return NVME_INVALID_OPCODE
| NVME_DNR
;
3638 status
= nvme_ns_status(req
->ns
);
3639 if (unlikely(status
)) {
3643 switch (req
->cmd
.opcode
) {
3644 case NVME_CMD_WRITE_ZEROES
:
3645 return nvme_write_zeroes(n
, req
);
3646 case NVME_CMD_ZONE_APPEND
:
3647 return nvme_zone_append(n
, req
);
3648 case NVME_CMD_WRITE
:
3649 return nvme_write(n
, req
);
3651 return nvme_read(n
, req
);
3652 case NVME_CMD_COMPARE
:
3653 return nvme_compare(n
, req
);
3655 return nvme_dsm(n
, req
);
3656 case NVME_CMD_VERIFY
:
3657 return nvme_verify(n
, req
);
3659 return nvme_copy(n
, req
);
3660 case NVME_CMD_ZONE_MGMT_SEND
:
3661 return nvme_zone_mgmt_send(n
, req
);
3662 case NVME_CMD_ZONE_MGMT_RECV
:
3663 return nvme_zone_mgmt_recv(n
, req
);
3668 return NVME_INVALID_OPCODE
| NVME_DNR
;
3671 static void nvme_free_sq(NvmeSQueue
*sq
, NvmeCtrl
*n
)
3673 n
->sq
[sq
->sqid
] = NULL
;
3674 timer_free(sq
->timer
);
3681 static uint16_t nvme_del_sq(NvmeCtrl
*n
, NvmeRequest
*req
)
3683 NvmeDeleteQ
*c
= (NvmeDeleteQ
*)&req
->cmd
;
3684 NvmeRequest
*r
, *next
;
3687 uint16_t qid
= le16_to_cpu(c
->qid
);
3689 if (unlikely(!qid
|| nvme_check_sqid(n
, qid
))) {
3690 trace_pci_nvme_err_invalid_del_sq(qid
);
3691 return NVME_INVALID_QID
| NVME_DNR
;
3694 trace_pci_nvme_del_sq(qid
);
3697 while (!QTAILQ_EMPTY(&sq
->out_req_list
)) {
3698 r
= QTAILQ_FIRST(&sq
->out_req_list
);
3700 blk_aio_cancel(r
->aiocb
);
3702 if (!nvme_check_cqid(n
, sq
->cqid
)) {
3703 cq
= n
->cq
[sq
->cqid
];
3704 QTAILQ_REMOVE(&cq
->sq_list
, sq
, entry
);
3707 QTAILQ_FOREACH_SAFE(r
, &cq
->req_list
, entry
, next
) {
3709 QTAILQ_REMOVE(&cq
->req_list
, r
, entry
);
3710 QTAILQ_INSERT_TAIL(&sq
->req_list
, r
, entry
);
3715 nvme_free_sq(sq
, n
);
3716 return NVME_SUCCESS
;
3719 static void nvme_init_sq(NvmeSQueue
*sq
, NvmeCtrl
*n
, uint64_t dma_addr
,
3720 uint16_t sqid
, uint16_t cqid
, uint16_t size
)
3726 sq
->dma_addr
= dma_addr
;
3730 sq
->head
= sq
->tail
= 0;
3731 sq
->io_req
= g_new0(NvmeRequest
, sq
->size
);
3733 QTAILQ_INIT(&sq
->req_list
);
3734 QTAILQ_INIT(&sq
->out_req_list
);
3735 for (i
= 0; i
< sq
->size
; i
++) {
3736 sq
->io_req
[i
].sq
= sq
;
3737 QTAILQ_INSERT_TAIL(&(sq
->req_list
), &sq
->io_req
[i
], entry
);
3739 sq
->timer
= timer_new_ns(QEMU_CLOCK_VIRTUAL
, nvme_process_sq
, sq
);
3741 assert(n
->cq
[cqid
]);
3743 QTAILQ_INSERT_TAIL(&(cq
->sq_list
), sq
, entry
);
3747 static uint16_t nvme_create_sq(NvmeCtrl
*n
, NvmeRequest
*req
)
3750 NvmeCreateSq
*c
= (NvmeCreateSq
*)&req
->cmd
;
3752 uint16_t cqid
= le16_to_cpu(c
->cqid
);
3753 uint16_t sqid
= le16_to_cpu(c
->sqid
);
3754 uint16_t qsize
= le16_to_cpu(c
->qsize
);
3755 uint16_t qflags
= le16_to_cpu(c
->sq_flags
);
3756 uint64_t prp1
= le64_to_cpu(c
->prp1
);
3758 trace_pci_nvme_create_sq(prp1
, sqid
, cqid
, qsize
, qflags
);
3760 if (unlikely(!cqid
|| nvme_check_cqid(n
, cqid
))) {
3761 trace_pci_nvme_err_invalid_create_sq_cqid(cqid
);
3762 return NVME_INVALID_CQID
| NVME_DNR
;
3764 if (unlikely(!sqid
|| sqid
> n
->params
.max_ioqpairs
||
3765 n
->sq
[sqid
] != NULL
)) {
3766 trace_pci_nvme_err_invalid_create_sq_sqid(sqid
);
3767 return NVME_INVALID_QID
| NVME_DNR
;
3769 if (unlikely(!qsize
|| qsize
> NVME_CAP_MQES(n
->bar
.cap
))) {
3770 trace_pci_nvme_err_invalid_create_sq_size(qsize
);
3771 return NVME_MAX_QSIZE_EXCEEDED
| NVME_DNR
;
3773 if (unlikely(prp1
& (n
->page_size
- 1))) {
3774 trace_pci_nvme_err_invalid_create_sq_addr(prp1
);
3775 return NVME_INVALID_PRP_OFFSET
| NVME_DNR
;
3777 if (unlikely(!(NVME_SQ_FLAGS_PC(qflags
)))) {
3778 trace_pci_nvme_err_invalid_create_sq_qflags(NVME_SQ_FLAGS_PC(qflags
));
3779 return NVME_INVALID_FIELD
| NVME_DNR
;
3781 sq
= g_malloc0(sizeof(*sq
));
3782 nvme_init_sq(sq
, n
, prp1
, sqid
, cqid
, qsize
+ 1);
3783 return NVME_SUCCESS
;
3787 uint64_t units_read
;
3788 uint64_t units_written
;
3789 uint64_t read_commands
;
3790 uint64_t write_commands
;
3793 static void nvme_set_blk_stats(NvmeNamespace
*ns
, struct nvme_stats
*stats
)
3795 BlockAcctStats
*s
= blk_get_stats(ns
->blkconf
.blk
);
3797 stats
->units_read
+= s
->nr_bytes
[BLOCK_ACCT_READ
] >> BDRV_SECTOR_BITS
;
3798 stats
->units_written
+= s
->nr_bytes
[BLOCK_ACCT_WRITE
] >> BDRV_SECTOR_BITS
;
3799 stats
->read_commands
+= s
->nr_ops
[BLOCK_ACCT_READ
];
3800 stats
->write_commands
+= s
->nr_ops
[BLOCK_ACCT_WRITE
];
3803 static uint16_t nvme_smart_info(NvmeCtrl
*n
, uint8_t rae
, uint32_t buf_len
,
3804 uint64_t off
, NvmeRequest
*req
)
3806 uint32_t nsid
= le32_to_cpu(req
->cmd
.nsid
);
3807 struct nvme_stats stats
= { 0 };
3808 NvmeSmartLog smart
= { 0 };
3813 if (off
>= sizeof(smart
)) {
3814 return NVME_INVALID_FIELD
| NVME_DNR
;
3817 if (nsid
!= 0xffffffff) {
3818 ns
= nvme_ns(n
, nsid
);
3820 return NVME_INVALID_NSID
| NVME_DNR
;
3822 nvme_set_blk_stats(ns
, &stats
);
3826 for (i
= 1; i
<= n
->num_namespaces
; i
++) {
3831 nvme_set_blk_stats(ns
, &stats
);
3835 trans_len
= MIN(sizeof(smart
) - off
, buf_len
);
3836 smart
.critical_warning
= n
->smart_critical_warning
;
3838 smart
.data_units_read
[0] = cpu_to_le64(DIV_ROUND_UP(stats
.units_read
,
3840 smart
.data_units_written
[0] = cpu_to_le64(DIV_ROUND_UP(stats
.units_written
,
3842 smart
.host_read_commands
[0] = cpu_to_le64(stats
.read_commands
);
3843 smart
.host_write_commands
[0] = cpu_to_le64(stats
.write_commands
);
3845 smart
.temperature
= cpu_to_le16(n
->temperature
);
3847 if ((n
->temperature
>= n
->features
.temp_thresh_hi
) ||
3848 (n
->temperature
<= n
->features
.temp_thresh_low
)) {
3849 smart
.critical_warning
|= NVME_SMART_TEMPERATURE
;
3852 current_ms
= qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
);
3853 smart
.power_on_hours
[0] =
3854 cpu_to_le64((((current_ms
- n
->starttime_ms
) / 1000) / 60) / 60);
3857 nvme_clear_events(n
, NVME_AER_TYPE_SMART
);
3860 return nvme_c2h(n
, (uint8_t *) &smart
+ off
, trans_len
, req
);
3863 static uint16_t nvme_fw_log_info(NvmeCtrl
*n
, uint32_t buf_len
, uint64_t off
,
3867 NvmeFwSlotInfoLog fw_log
= {
3871 if (off
>= sizeof(fw_log
)) {
3872 return NVME_INVALID_FIELD
| NVME_DNR
;
3875 strpadcpy((char *)&fw_log
.frs1
, sizeof(fw_log
.frs1
), "1.0", ' ');
3876 trans_len
= MIN(sizeof(fw_log
) - off
, buf_len
);
3878 return nvme_c2h(n
, (uint8_t *) &fw_log
+ off
, trans_len
, req
);
3881 static uint16_t nvme_error_info(NvmeCtrl
*n
, uint8_t rae
, uint32_t buf_len
,
3882 uint64_t off
, NvmeRequest
*req
)
3885 NvmeErrorLog errlog
;
3887 if (off
>= sizeof(errlog
)) {
3888 return NVME_INVALID_FIELD
| NVME_DNR
;
3892 nvme_clear_events(n
, NVME_AER_TYPE_ERROR
);
3895 memset(&errlog
, 0x0, sizeof(errlog
));
3896 trans_len
= MIN(sizeof(errlog
) - off
, buf_len
);
3898 return nvme_c2h(n
, (uint8_t *)&errlog
, trans_len
, req
);
3901 static uint16_t nvme_changed_nslist(NvmeCtrl
*n
, uint8_t rae
, uint32_t buf_len
,
3902 uint64_t off
, NvmeRequest
*req
)
3904 uint32_t nslist
[1024];
3909 memset(nslist
, 0x0, sizeof(nslist
));
3910 trans_len
= MIN(sizeof(nslist
) - off
, buf_len
);
3912 while ((nsid
= find_first_bit(n
->changed_nsids
, NVME_CHANGED_NSID_SIZE
)) !=
3913 NVME_CHANGED_NSID_SIZE
) {
3915 * If more than 1024 namespaces, the first entry in the log page should
3916 * be set to 0xffffffff and the others to 0 as spec.
3918 if (i
== ARRAY_SIZE(nslist
)) {
3919 memset(nslist
, 0x0, sizeof(nslist
));
3920 nslist
[0] = 0xffffffff;
3925 clear_bit(nsid
, n
->changed_nsids
);
3929 * Remove all the remaining list entries in case returns directly due to
3930 * more than 1024 namespaces.
3932 if (nslist
[0] == 0xffffffff) {
3933 bitmap_zero(n
->changed_nsids
, NVME_CHANGED_NSID_SIZE
);
3937 nvme_clear_events(n
, NVME_AER_TYPE_NOTICE
);
3940 return nvme_c2h(n
, ((uint8_t *)nslist
) + off
, trans_len
, req
);
3943 static uint16_t nvme_cmd_effects(NvmeCtrl
*n
, uint8_t csi
, uint32_t buf_len
,
3944 uint64_t off
, NvmeRequest
*req
)
3946 NvmeEffectsLog log
= {};
3947 const uint32_t *src_iocs
= NULL
;
3950 if (off
>= sizeof(log
)) {
3951 trace_pci_nvme_err_invalid_log_page_offset(off
, sizeof(log
));
3952 return NVME_INVALID_FIELD
| NVME_DNR
;
3955 switch (NVME_CC_CSS(n
->bar
.cc
)) {
3956 case NVME_CC_CSS_NVM
:
3957 src_iocs
= nvme_cse_iocs_nvm
;
3959 case NVME_CC_CSS_ADMIN_ONLY
:
3961 case NVME_CC_CSS_CSI
:
3964 src_iocs
= nvme_cse_iocs_nvm
;
3966 case NVME_CSI_ZONED
:
3967 src_iocs
= nvme_cse_iocs_zoned
;
3972 memcpy(log
.acs
, nvme_cse_acs
, sizeof(nvme_cse_acs
));
3975 memcpy(log
.iocs
, src_iocs
, sizeof(log
.iocs
));
3978 trans_len
= MIN(sizeof(log
) - off
, buf_len
);
3980 return nvme_c2h(n
, ((uint8_t *)&log
) + off
, trans_len
, req
);
3983 static uint16_t nvme_get_log(NvmeCtrl
*n
, NvmeRequest
*req
)
3985 NvmeCmd
*cmd
= &req
->cmd
;
3987 uint32_t dw10
= le32_to_cpu(cmd
->cdw10
);
3988 uint32_t dw11
= le32_to_cpu(cmd
->cdw11
);
3989 uint32_t dw12
= le32_to_cpu(cmd
->cdw12
);
3990 uint32_t dw13
= le32_to_cpu(cmd
->cdw13
);
3991 uint8_t lid
= dw10
& 0xff;
3992 uint8_t lsp
= (dw10
>> 8) & 0xf;
3993 uint8_t rae
= (dw10
>> 15) & 0x1;
3994 uint8_t csi
= le32_to_cpu(cmd
->cdw14
) >> 24;
3995 uint32_t numdl
, numdu
;
3996 uint64_t off
, lpol
, lpou
;
4000 numdl
= (dw10
>> 16);
4001 numdu
= (dw11
& 0xffff);
4005 len
= (((numdu
<< 16) | numdl
) + 1) << 2;
4006 off
= (lpou
<< 32ULL) | lpol
;
4009 return NVME_INVALID_FIELD
| NVME_DNR
;
4012 trace_pci_nvme_get_log(nvme_cid(req
), lid
, lsp
, rae
, len
, off
);
4014 status
= nvme_check_mdts(n
, len
);
4020 case NVME_LOG_ERROR_INFO
:
4021 return nvme_error_info(n
, rae
, len
, off
, req
);
4022 case NVME_LOG_SMART_INFO
:
4023 return nvme_smart_info(n
, rae
, len
, off
, req
);
4024 case NVME_LOG_FW_SLOT_INFO
:
4025 return nvme_fw_log_info(n
, len
, off
, req
);
4026 case NVME_LOG_CHANGED_NSLIST
:
4027 return nvme_changed_nslist(n
, rae
, len
, off
, req
);
4028 case NVME_LOG_CMD_EFFECTS
:
4029 return nvme_cmd_effects(n
, csi
, len
, off
, req
);
4031 trace_pci_nvme_err_invalid_log_page(nvme_cid(req
), lid
);
4032 return NVME_INVALID_FIELD
| NVME_DNR
;
4036 static void nvme_free_cq(NvmeCQueue
*cq
, NvmeCtrl
*n
)
4038 n
->cq
[cq
->cqid
] = NULL
;
4039 timer_free(cq
->timer
);
4040 if (msix_enabled(&n
->parent_obj
)) {
4041 msix_vector_unuse(&n
->parent_obj
, cq
->vector
);
4048 static uint16_t nvme_del_cq(NvmeCtrl
*n
, NvmeRequest
*req
)
4050 NvmeDeleteQ
*c
= (NvmeDeleteQ
*)&req
->cmd
;
4052 uint16_t qid
= le16_to_cpu(c
->qid
);
4054 if (unlikely(!qid
|| nvme_check_cqid(n
, qid
))) {
4055 trace_pci_nvme_err_invalid_del_cq_cqid(qid
);
4056 return NVME_INVALID_CQID
| NVME_DNR
;
4060 if (unlikely(!QTAILQ_EMPTY(&cq
->sq_list
))) {
4061 trace_pci_nvme_err_invalid_del_cq_notempty(qid
);
4062 return NVME_INVALID_QUEUE_DEL
;
4064 nvme_irq_deassert(n
, cq
);
4065 trace_pci_nvme_del_cq(qid
);
4066 nvme_free_cq(cq
, n
);
4067 return NVME_SUCCESS
;
4070 static void nvme_init_cq(NvmeCQueue
*cq
, NvmeCtrl
*n
, uint64_t dma_addr
,
4071 uint16_t cqid
, uint16_t vector
, uint16_t size
,
4072 uint16_t irq_enabled
)
4076 if (msix_enabled(&n
->parent_obj
)) {
4077 ret
= msix_vector_use(&n
->parent_obj
, vector
);
4083 cq
->dma_addr
= dma_addr
;
4085 cq
->irq_enabled
= irq_enabled
;
4086 cq
->vector
= vector
;
4087 cq
->head
= cq
->tail
= 0;
4088 QTAILQ_INIT(&cq
->req_list
);
4089 QTAILQ_INIT(&cq
->sq_list
);
4091 cq
->timer
= timer_new_ns(QEMU_CLOCK_VIRTUAL
, nvme_post_cqes
, cq
);
4094 static uint16_t nvme_create_cq(NvmeCtrl
*n
, NvmeRequest
*req
)
4097 NvmeCreateCq
*c
= (NvmeCreateCq
*)&req
->cmd
;
4098 uint16_t cqid
= le16_to_cpu(c
->cqid
);
4099 uint16_t vector
= le16_to_cpu(c
->irq_vector
);
4100 uint16_t qsize
= le16_to_cpu(c
->qsize
);
4101 uint16_t qflags
= le16_to_cpu(c
->cq_flags
);
4102 uint64_t prp1
= le64_to_cpu(c
->prp1
);
4104 trace_pci_nvme_create_cq(prp1
, cqid
, vector
, qsize
, qflags
,
4105 NVME_CQ_FLAGS_IEN(qflags
) != 0);
4107 if (unlikely(!cqid
|| cqid
> n
->params
.max_ioqpairs
||
4108 n
->cq
[cqid
] != NULL
)) {
4109 trace_pci_nvme_err_invalid_create_cq_cqid(cqid
);
4110 return NVME_INVALID_QID
| NVME_DNR
;
4112 if (unlikely(!qsize
|| qsize
> NVME_CAP_MQES(n
->bar
.cap
))) {
4113 trace_pci_nvme_err_invalid_create_cq_size(qsize
);
4114 return NVME_MAX_QSIZE_EXCEEDED
| NVME_DNR
;
4116 if (unlikely(prp1
& (n
->page_size
- 1))) {
4117 trace_pci_nvme_err_invalid_create_cq_addr(prp1
);
4118 return NVME_INVALID_PRP_OFFSET
| NVME_DNR
;
4120 if (unlikely(!msix_enabled(&n
->parent_obj
) && vector
)) {
4121 trace_pci_nvme_err_invalid_create_cq_vector(vector
);
4122 return NVME_INVALID_IRQ_VECTOR
| NVME_DNR
;
4124 if (unlikely(vector
>= n
->params
.msix_qsize
)) {
4125 trace_pci_nvme_err_invalid_create_cq_vector(vector
);
4126 return NVME_INVALID_IRQ_VECTOR
| NVME_DNR
;
4128 if (unlikely(!(NVME_CQ_FLAGS_PC(qflags
)))) {
4129 trace_pci_nvme_err_invalid_create_cq_qflags(NVME_CQ_FLAGS_PC(qflags
));
4130 return NVME_INVALID_FIELD
| NVME_DNR
;
4133 cq
= g_malloc0(sizeof(*cq
));
4134 nvme_init_cq(cq
, n
, prp1
, cqid
, vector
, qsize
+ 1,
4135 NVME_CQ_FLAGS_IEN(qflags
));
4138 * It is only required to set qs_created when creating a completion queue;
4139 * creating a submission queue without a matching completion queue will
4142 n
->qs_created
= true;
4143 return NVME_SUCCESS
;
4146 static uint16_t nvme_rpt_empty_id_struct(NvmeCtrl
*n
, NvmeRequest
*req
)
4148 uint8_t id
[NVME_IDENTIFY_DATA_SIZE
] = {};
4150 return nvme_c2h(n
, id
, sizeof(id
), req
);
4153 static inline bool nvme_csi_has_nvm_support(NvmeNamespace
*ns
)
4157 case NVME_CSI_ZONED
:
4163 static uint16_t nvme_identify_ctrl(NvmeCtrl
*n
, NvmeRequest
*req
)
4165 trace_pci_nvme_identify_ctrl();
4167 return nvme_c2h(n
, (uint8_t *)&n
->id_ctrl
, sizeof(n
->id_ctrl
), req
);
4170 static uint16_t nvme_identify_ctrl_csi(NvmeCtrl
*n
, NvmeRequest
*req
)
4172 NvmeIdentify
*c
= (NvmeIdentify
*)&req
->cmd
;
4173 uint8_t id
[NVME_IDENTIFY_DATA_SIZE
] = {};
4174 NvmeIdCtrlNvm
*id_nvm
= (NvmeIdCtrlNvm
*)&id
;
4176 trace_pci_nvme_identify_ctrl_csi(c
->csi
);
4180 id_nvm
->vsl
= n
->params
.vsl
;
4181 id_nvm
->dmrsl
= cpu_to_le32(n
->dmrsl
);
4184 case NVME_CSI_ZONED
:
4185 ((NvmeIdCtrlZoned
*)&id
)->zasl
= n
->params
.zasl
;
4189 return NVME_INVALID_FIELD
| NVME_DNR
;
4192 return nvme_c2h(n
, id
, sizeof(id
), req
);
4195 static uint16_t nvme_identify_ns(NvmeCtrl
*n
, NvmeRequest
*req
, bool active
)
4198 NvmeIdentify
*c
= (NvmeIdentify
*)&req
->cmd
;
4199 uint32_t nsid
= le32_to_cpu(c
->nsid
);
4201 trace_pci_nvme_identify_ns(nsid
);
4203 if (!nvme_nsid_valid(n
, nsid
) || nsid
== NVME_NSID_BROADCAST
) {
4204 return NVME_INVALID_NSID
| NVME_DNR
;
4207 ns
= nvme_ns(n
, nsid
);
4208 if (unlikely(!ns
)) {
4210 ns
= nvme_subsys_ns(n
->subsys
, nsid
);
4212 return nvme_rpt_empty_id_struct(n
, req
);
4215 return nvme_rpt_empty_id_struct(n
, req
);
4219 if (c
->csi
== NVME_CSI_NVM
&& nvme_csi_has_nvm_support(ns
)) {
4220 return nvme_c2h(n
, (uint8_t *)&ns
->id_ns
, sizeof(NvmeIdNs
), req
);
4223 return NVME_INVALID_CMD_SET
| NVME_DNR
;
4226 static uint16_t nvme_identify_ns_attached_list(NvmeCtrl
*n
, NvmeRequest
*req
)
4228 NvmeIdentify
*c
= (NvmeIdentify
*)&req
->cmd
;
4229 uint16_t min_id
= le16_to_cpu(c
->ctrlid
);
4230 uint16_t list
[NVME_CONTROLLER_LIST_SIZE
] = {};
4231 uint16_t *ids
= &list
[1];
4234 int cntlid
, nr_ids
= 0;
4236 trace_pci_nvme_identify_ns_attached_list(min_id
);
4238 if (c
->nsid
== NVME_NSID_BROADCAST
) {
4239 return NVME_INVALID_FIELD
| NVME_DNR
;
4242 ns
= nvme_subsys_ns(n
->subsys
, c
->nsid
);
4244 return NVME_INVALID_FIELD
| NVME_DNR
;
4247 for (cntlid
= min_id
; cntlid
< ARRAY_SIZE(n
->subsys
->ctrls
); cntlid
++) {
4248 ctrl
= nvme_subsys_ctrl(n
->subsys
, cntlid
);
4253 if (!nvme_ns_is_attached(ctrl
, ns
)) {
4257 ids
[nr_ids
++] = cntlid
;
4262 return nvme_c2h(n
, (uint8_t *)list
, sizeof(list
), req
);
4265 static uint16_t nvme_identify_ns_csi(NvmeCtrl
*n
, NvmeRequest
*req
,
4269 NvmeIdentify
*c
= (NvmeIdentify
*)&req
->cmd
;
4270 uint32_t nsid
= le32_to_cpu(c
->nsid
);
4272 trace_pci_nvme_identify_ns_csi(nsid
, c
->csi
);
4274 if (!nvme_nsid_valid(n
, nsid
) || nsid
== NVME_NSID_BROADCAST
) {
4275 return NVME_INVALID_NSID
| NVME_DNR
;
4278 ns
= nvme_ns(n
, nsid
);
4279 if (unlikely(!ns
)) {
4281 ns
= nvme_subsys_ns(n
->subsys
, nsid
);
4283 return nvme_rpt_empty_id_struct(n
, req
);
4286 return nvme_rpt_empty_id_struct(n
, req
);
4290 if (c
->csi
== NVME_CSI_NVM
&& nvme_csi_has_nvm_support(ns
)) {
4291 return nvme_rpt_empty_id_struct(n
, req
);
4292 } else if (c
->csi
== NVME_CSI_ZONED
&& ns
->csi
== NVME_CSI_ZONED
) {
4293 return nvme_c2h(n
, (uint8_t *)ns
->id_ns_zoned
, sizeof(NvmeIdNsZoned
),
4297 return NVME_INVALID_FIELD
| NVME_DNR
;
4300 static uint16_t nvme_identify_nslist(NvmeCtrl
*n
, NvmeRequest
*req
,
4304 NvmeIdentify
*c
= (NvmeIdentify
*)&req
->cmd
;
4305 uint32_t min_nsid
= le32_to_cpu(c
->nsid
);
4306 uint8_t list
[NVME_IDENTIFY_DATA_SIZE
] = {};
4307 static const int data_len
= sizeof(list
);
4308 uint32_t *list_ptr
= (uint32_t *)list
;
4311 trace_pci_nvme_identify_nslist(min_nsid
);
4314 * Both 0xffffffff (NVME_NSID_BROADCAST) and 0xfffffffe are invalid values
4315 * since the Active Namespace ID List should return namespaces with ids
4316 * *higher* than the NSID specified in the command. This is also specified
4317 * in the spec (NVM Express v1.3d, Section 5.15.4).
4319 if (min_nsid
>= NVME_NSID_BROADCAST
- 1) {
4320 return NVME_INVALID_NSID
| NVME_DNR
;
4323 for (i
= 1; i
<= n
->num_namespaces
; i
++) {
4327 ns
= nvme_subsys_ns(n
->subsys
, i
);
4335 if (ns
->params
.nsid
<= min_nsid
) {
4338 list_ptr
[j
++] = cpu_to_le32(ns
->params
.nsid
);
4339 if (j
== data_len
/ sizeof(uint32_t)) {
4344 return nvme_c2h(n
, list
, data_len
, req
);
4347 static uint16_t nvme_identify_nslist_csi(NvmeCtrl
*n
, NvmeRequest
*req
,
4351 NvmeIdentify
*c
= (NvmeIdentify
*)&req
->cmd
;
4352 uint32_t min_nsid
= le32_to_cpu(c
->nsid
);
4353 uint8_t list
[NVME_IDENTIFY_DATA_SIZE
] = {};
4354 static const int data_len
= sizeof(list
);
4355 uint32_t *list_ptr
= (uint32_t *)list
;
4358 trace_pci_nvme_identify_nslist_csi(min_nsid
, c
->csi
);
4361 * Same as in nvme_identify_nslist(), 0xffffffff/0xfffffffe are invalid.
4363 if (min_nsid
>= NVME_NSID_BROADCAST
- 1) {
4364 return NVME_INVALID_NSID
| NVME_DNR
;
4367 if (c
->csi
!= NVME_CSI_NVM
&& c
->csi
!= NVME_CSI_ZONED
) {
4368 return NVME_INVALID_FIELD
| NVME_DNR
;
4371 for (i
= 1; i
<= n
->num_namespaces
; i
++) {
4375 ns
= nvme_subsys_ns(n
->subsys
, i
);
4383 if (ns
->params
.nsid
<= min_nsid
|| c
->csi
!= ns
->csi
) {
4386 list_ptr
[j
++] = cpu_to_le32(ns
->params
.nsid
);
4387 if (j
== data_len
/ sizeof(uint32_t)) {
4392 return nvme_c2h(n
, list
, data_len
, req
);
4395 static uint16_t nvme_identify_ns_descr_list(NvmeCtrl
*n
, NvmeRequest
*req
)
4398 NvmeIdentify
*c
= (NvmeIdentify
*)&req
->cmd
;
4399 uint32_t nsid
= le32_to_cpu(c
->nsid
);
4400 uint8_t list
[NVME_IDENTIFY_DATA_SIZE
] = {};
4405 uint8_t v
[NVME_NIDL_UUID
];
4413 struct data
*ns_descrs
= (struct data
*)list
;
4415 trace_pci_nvme_identify_ns_descr_list(nsid
);
4417 if (!nvme_nsid_valid(n
, nsid
) || nsid
== NVME_NSID_BROADCAST
) {
4418 return NVME_INVALID_NSID
| NVME_DNR
;
4421 ns
= nvme_ns(n
, nsid
);
4422 if (unlikely(!ns
)) {
4423 return NVME_INVALID_FIELD
| NVME_DNR
;
4427 * Because the NGUID and EUI64 fields are 0 in the Identify Namespace data
4428 * structure, a Namespace UUID (nidt = 0x3) must be reported in the
4429 * Namespace Identification Descriptor. Add the namespace UUID here.
4431 ns_descrs
->uuid
.hdr
.nidt
= NVME_NIDT_UUID
;
4432 ns_descrs
->uuid
.hdr
.nidl
= NVME_NIDL_UUID
;
4433 memcpy(&ns_descrs
->uuid
.v
, ns
->params
.uuid
.data
, NVME_NIDL_UUID
);
4435 ns_descrs
->csi
.hdr
.nidt
= NVME_NIDT_CSI
;
4436 ns_descrs
->csi
.hdr
.nidl
= NVME_NIDL_CSI
;
4437 ns_descrs
->csi
.v
= ns
->csi
;
4439 return nvme_c2h(n
, list
, sizeof(list
), req
);
4442 static uint16_t nvme_identify_cmd_set(NvmeCtrl
*n
, NvmeRequest
*req
)
4444 uint8_t list
[NVME_IDENTIFY_DATA_SIZE
] = {};
4445 static const int data_len
= sizeof(list
);
4447 trace_pci_nvme_identify_cmd_set();
4449 NVME_SET_CSI(*list
, NVME_CSI_NVM
);
4450 NVME_SET_CSI(*list
, NVME_CSI_ZONED
);
4452 return nvme_c2h(n
, list
, data_len
, req
);
4455 static uint16_t nvme_identify(NvmeCtrl
*n
, NvmeRequest
*req
)
4457 NvmeIdentify
*c
= (NvmeIdentify
*)&req
->cmd
;
4459 trace_pci_nvme_identify(nvme_cid(req
), c
->cns
, le16_to_cpu(c
->ctrlid
),
4463 case NVME_ID_CNS_NS
:
4464 return nvme_identify_ns(n
, req
, true);
4465 case NVME_ID_CNS_NS_PRESENT
:
4466 return nvme_identify_ns(n
, req
, false);
4467 case NVME_ID_CNS_NS_ATTACHED_CTRL_LIST
:
4468 return nvme_identify_ns_attached_list(n
, req
);
4469 case NVME_ID_CNS_CS_NS
:
4470 return nvme_identify_ns_csi(n
, req
, true);
4471 case NVME_ID_CNS_CS_NS_PRESENT
:
4472 return nvme_identify_ns_csi(n
, req
, false);
4473 case NVME_ID_CNS_CTRL
:
4474 return nvme_identify_ctrl(n
, req
);
4475 case NVME_ID_CNS_CS_CTRL
:
4476 return nvme_identify_ctrl_csi(n
, req
);
4477 case NVME_ID_CNS_NS_ACTIVE_LIST
:
4478 return nvme_identify_nslist(n
, req
, true);
4479 case NVME_ID_CNS_NS_PRESENT_LIST
:
4480 return nvme_identify_nslist(n
, req
, false);
4481 case NVME_ID_CNS_CS_NS_ACTIVE_LIST
:
4482 return nvme_identify_nslist_csi(n
, req
, true);
4483 case NVME_ID_CNS_CS_NS_PRESENT_LIST
:
4484 return nvme_identify_nslist_csi(n
, req
, false);
4485 case NVME_ID_CNS_NS_DESCR_LIST
:
4486 return nvme_identify_ns_descr_list(n
, req
);
4487 case NVME_ID_CNS_IO_COMMAND_SET
:
4488 return nvme_identify_cmd_set(n
, req
);
4490 trace_pci_nvme_err_invalid_identify_cns(le32_to_cpu(c
->cns
));
4491 return NVME_INVALID_FIELD
| NVME_DNR
;
4495 static uint16_t nvme_abort(NvmeCtrl
*n
, NvmeRequest
*req
)
4497 uint16_t sqid
= le32_to_cpu(req
->cmd
.cdw10
) & 0xffff;
4499 req
->cqe
.result
= 1;
4500 if (nvme_check_sqid(n
, sqid
)) {
4501 return NVME_INVALID_FIELD
| NVME_DNR
;
4504 return NVME_SUCCESS
;
4507 static inline void nvme_set_timestamp(NvmeCtrl
*n
, uint64_t ts
)
4509 trace_pci_nvme_setfeat_timestamp(ts
);
4511 n
->host_timestamp
= le64_to_cpu(ts
);
4512 n
->timestamp_set_qemu_clock_ms
= qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
);
4515 static inline uint64_t nvme_get_timestamp(const NvmeCtrl
*n
)
4517 uint64_t current_time
= qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
);
4518 uint64_t elapsed_time
= current_time
- n
->timestamp_set_qemu_clock_ms
;
4520 union nvme_timestamp
{
4522 uint64_t timestamp
:48;
4530 union nvme_timestamp ts
;
4532 ts
.timestamp
= n
->host_timestamp
+ elapsed_time
;
4534 /* If the host timestamp is non-zero, set the timestamp origin */
4535 ts
.origin
= n
->host_timestamp
? 0x01 : 0x00;
4537 trace_pci_nvme_getfeat_timestamp(ts
.all
);
4539 return cpu_to_le64(ts
.all
);
4542 static uint16_t nvme_get_feature_timestamp(NvmeCtrl
*n
, NvmeRequest
*req
)
4544 uint64_t timestamp
= nvme_get_timestamp(n
);
4546 return nvme_c2h(n
, (uint8_t *)×tamp
, sizeof(timestamp
), req
);
4549 static uint16_t nvme_get_feature(NvmeCtrl
*n
, NvmeRequest
*req
)
4551 NvmeCmd
*cmd
= &req
->cmd
;
4552 uint32_t dw10
= le32_to_cpu(cmd
->cdw10
);
4553 uint32_t dw11
= le32_to_cpu(cmd
->cdw11
);
4554 uint32_t nsid
= le32_to_cpu(cmd
->nsid
);
4556 uint8_t fid
= NVME_GETSETFEAT_FID(dw10
);
4557 NvmeGetFeatureSelect sel
= NVME_GETFEAT_SELECT(dw10
);
4562 static const uint32_t nvme_feature_default
[NVME_FID_MAX
] = {
4563 [NVME_ARBITRATION
] = NVME_ARB_AB_NOLIMIT
,
4566 trace_pci_nvme_getfeat(nvme_cid(req
), nsid
, fid
, sel
, dw11
);
4568 if (!nvme_feature_support
[fid
]) {
4569 return NVME_INVALID_FIELD
| NVME_DNR
;
4572 if (nvme_feature_cap
[fid
] & NVME_FEAT_CAP_NS
) {
4573 if (!nvme_nsid_valid(n
, nsid
) || nsid
== NVME_NSID_BROADCAST
) {
4575 * The Reservation Notification Mask and Reservation Persistence
4576 * features require a status code of Invalid Field in Command when
4577 * NSID is 0xFFFFFFFF. Since the device does not support those
4578 * features we can always return Invalid Namespace or Format as we
4579 * should do for all other features.
4581 return NVME_INVALID_NSID
| NVME_DNR
;
4584 if (!nvme_ns(n
, nsid
)) {
4585 return NVME_INVALID_FIELD
| NVME_DNR
;
4590 case NVME_GETFEAT_SELECT_CURRENT
:
4592 case NVME_GETFEAT_SELECT_SAVED
:
4593 /* no features are saveable by the controller; fallthrough */
4594 case NVME_GETFEAT_SELECT_DEFAULT
:
4596 case NVME_GETFEAT_SELECT_CAP
:
4597 result
= nvme_feature_cap
[fid
];
4602 case NVME_TEMPERATURE_THRESHOLD
:
4606 * The controller only implements the Composite Temperature sensor, so
4607 * return 0 for all other sensors.
4609 if (NVME_TEMP_TMPSEL(dw11
) != NVME_TEMP_TMPSEL_COMPOSITE
) {
4613 switch (NVME_TEMP_THSEL(dw11
)) {
4614 case NVME_TEMP_THSEL_OVER
:
4615 result
= n
->features
.temp_thresh_hi
;
4617 case NVME_TEMP_THSEL_UNDER
:
4618 result
= n
->features
.temp_thresh_low
;
4622 return NVME_INVALID_FIELD
| NVME_DNR
;
4623 case NVME_ERROR_RECOVERY
:
4624 if (!nvme_nsid_valid(n
, nsid
)) {
4625 return NVME_INVALID_NSID
| NVME_DNR
;
4628 ns
= nvme_ns(n
, nsid
);
4629 if (unlikely(!ns
)) {
4630 return NVME_INVALID_FIELD
| NVME_DNR
;
4633 result
= ns
->features
.err_rec
;
4635 case NVME_VOLATILE_WRITE_CACHE
:
4637 for (i
= 1; i
<= n
->num_namespaces
; i
++) {
4643 result
= blk_enable_write_cache(ns
->blkconf
.blk
);
4648 trace_pci_nvme_getfeat_vwcache(result
? "enabled" : "disabled");
4650 case NVME_ASYNCHRONOUS_EVENT_CONF
:
4651 result
= n
->features
.async_config
;
4653 case NVME_TIMESTAMP
:
4654 return nvme_get_feature_timestamp(n
, req
);
4661 case NVME_TEMPERATURE_THRESHOLD
:
4664 if (NVME_TEMP_TMPSEL(dw11
) != NVME_TEMP_TMPSEL_COMPOSITE
) {
4668 if (NVME_TEMP_THSEL(dw11
) == NVME_TEMP_THSEL_OVER
) {
4669 result
= NVME_TEMPERATURE_WARNING
;
4673 case NVME_NUMBER_OF_QUEUES
:
4674 result
= (n
->params
.max_ioqpairs
- 1) |
4675 ((n
->params
.max_ioqpairs
- 1) << 16);
4676 trace_pci_nvme_getfeat_numq(result
);
4678 case NVME_INTERRUPT_VECTOR_CONF
:
4680 if (iv
>= n
->params
.max_ioqpairs
+ 1) {
4681 return NVME_INVALID_FIELD
| NVME_DNR
;
4685 if (iv
== n
->admin_cq
.vector
) {
4686 result
|= NVME_INTVC_NOCOALESCING
;
4689 case NVME_COMMAND_SET_PROFILE
:
4693 result
= nvme_feature_default
[fid
];
4698 req
->cqe
.result
= cpu_to_le32(result
);
4699 return NVME_SUCCESS
;
4702 static uint16_t nvme_set_feature_timestamp(NvmeCtrl
*n
, NvmeRequest
*req
)
4707 ret
= nvme_h2c(n
, (uint8_t *)×tamp
, sizeof(timestamp
), req
);
4712 nvme_set_timestamp(n
, timestamp
);
4714 return NVME_SUCCESS
;
4717 static uint16_t nvme_set_feature(NvmeCtrl
*n
, NvmeRequest
*req
)
4719 NvmeNamespace
*ns
= NULL
;
4721 NvmeCmd
*cmd
= &req
->cmd
;
4722 uint32_t dw10
= le32_to_cpu(cmd
->cdw10
);
4723 uint32_t dw11
= le32_to_cpu(cmd
->cdw11
);
4724 uint32_t nsid
= le32_to_cpu(cmd
->nsid
);
4725 uint8_t fid
= NVME_GETSETFEAT_FID(dw10
);
4726 uint8_t save
= NVME_SETFEAT_SAVE(dw10
);
4729 trace_pci_nvme_setfeat(nvme_cid(req
), nsid
, fid
, save
, dw11
);
4731 if (save
&& !(nvme_feature_cap
[fid
] & NVME_FEAT_CAP_SAVE
)) {
4732 return NVME_FID_NOT_SAVEABLE
| NVME_DNR
;
4735 if (!nvme_feature_support
[fid
]) {
4736 return NVME_INVALID_FIELD
| NVME_DNR
;
4739 if (nvme_feature_cap
[fid
] & NVME_FEAT_CAP_NS
) {
4740 if (nsid
!= NVME_NSID_BROADCAST
) {
4741 if (!nvme_nsid_valid(n
, nsid
)) {
4742 return NVME_INVALID_NSID
| NVME_DNR
;
4745 ns
= nvme_ns(n
, nsid
);
4746 if (unlikely(!ns
)) {
4747 return NVME_INVALID_FIELD
| NVME_DNR
;
4750 } else if (nsid
&& nsid
!= NVME_NSID_BROADCAST
) {
4751 if (!nvme_nsid_valid(n
, nsid
)) {
4752 return NVME_INVALID_NSID
| NVME_DNR
;
4755 return NVME_FEAT_NOT_NS_SPEC
| NVME_DNR
;
4758 if (!(nvme_feature_cap
[fid
] & NVME_FEAT_CAP_CHANGE
)) {
4759 return NVME_FEAT_NOT_CHANGEABLE
| NVME_DNR
;
4763 case NVME_TEMPERATURE_THRESHOLD
:
4764 if (NVME_TEMP_TMPSEL(dw11
) != NVME_TEMP_TMPSEL_COMPOSITE
) {
4768 switch (NVME_TEMP_THSEL(dw11
)) {
4769 case NVME_TEMP_THSEL_OVER
:
4770 n
->features
.temp_thresh_hi
= NVME_TEMP_TMPTH(dw11
);
4772 case NVME_TEMP_THSEL_UNDER
:
4773 n
->features
.temp_thresh_low
= NVME_TEMP_TMPTH(dw11
);
4776 return NVME_INVALID_FIELD
| NVME_DNR
;
4779 if ((n
->temperature
>= n
->features
.temp_thresh_hi
) ||
4780 (n
->temperature
<= n
->features
.temp_thresh_low
)) {
4781 nvme_smart_event(n
, NVME_AER_INFO_SMART_TEMP_THRESH
);
4785 case NVME_ERROR_RECOVERY
:
4786 if (nsid
== NVME_NSID_BROADCAST
) {
4787 for (i
= 1; i
<= n
->num_namespaces
; i
++) {
4794 if (NVME_ID_NS_NSFEAT_DULBE(ns
->id_ns
.nsfeat
)) {
4795 ns
->features
.err_rec
= dw11
;
4803 if (NVME_ID_NS_NSFEAT_DULBE(ns
->id_ns
.nsfeat
)) {
4804 ns
->features
.err_rec
= dw11
;
4807 case NVME_VOLATILE_WRITE_CACHE
:
4808 for (i
= 1; i
<= n
->num_namespaces
; i
++) {
4814 if (!(dw11
& 0x1) && blk_enable_write_cache(ns
->blkconf
.blk
)) {
4815 blk_flush(ns
->blkconf
.blk
);
4818 blk_set_enable_write_cache(ns
->blkconf
.blk
, dw11
& 1);
4823 case NVME_NUMBER_OF_QUEUES
:
4824 if (n
->qs_created
) {
4825 return NVME_CMD_SEQ_ERROR
| NVME_DNR
;
4829 * NVMe v1.3, Section 5.21.1.7: 0xffff is not an allowed value for NCQR
4832 if ((dw11
& 0xffff) == 0xffff || ((dw11
>> 16) & 0xffff) == 0xffff) {
4833 return NVME_INVALID_FIELD
| NVME_DNR
;
4836 trace_pci_nvme_setfeat_numq((dw11
& 0xFFFF) + 1,
4837 ((dw11
>> 16) & 0xFFFF) + 1,
4838 n
->params
.max_ioqpairs
,
4839 n
->params
.max_ioqpairs
);
4840 req
->cqe
.result
= cpu_to_le32((n
->params
.max_ioqpairs
- 1) |
4841 ((n
->params
.max_ioqpairs
- 1) << 16));
4843 case NVME_ASYNCHRONOUS_EVENT_CONF
:
4844 n
->features
.async_config
= dw11
;
4846 case NVME_TIMESTAMP
:
4847 return nvme_set_feature_timestamp(n
, req
);
4848 case NVME_COMMAND_SET_PROFILE
:
4850 trace_pci_nvme_err_invalid_iocsci(dw11
& 0x1ff);
4851 return NVME_CMD_SET_CMB_REJECTED
| NVME_DNR
;
4855 return NVME_FEAT_NOT_CHANGEABLE
| NVME_DNR
;
4857 return NVME_SUCCESS
;
4860 static uint16_t nvme_aer(NvmeCtrl
*n
, NvmeRequest
*req
)
4862 trace_pci_nvme_aer(nvme_cid(req
));
4864 if (n
->outstanding_aers
> n
->params
.aerl
) {
4865 trace_pci_nvme_aer_aerl_exceeded();
4866 return NVME_AER_LIMIT_EXCEEDED
;
4869 n
->aer_reqs
[n
->outstanding_aers
] = req
;
4870 n
->outstanding_aers
++;
4872 if (!QTAILQ_EMPTY(&n
->aer_queue
)) {
4873 nvme_process_aers(n
);
4876 return NVME_NO_COMPLETE
;
4879 static void __nvme_select_ns_iocs(NvmeCtrl
*n
, NvmeNamespace
*ns
);
4880 static uint16_t nvme_ns_attachment(NvmeCtrl
*n
, NvmeRequest
*req
)
4884 uint16_t list
[NVME_CONTROLLER_LIST_SIZE
] = {};
4885 uint32_t nsid
= le32_to_cpu(req
->cmd
.nsid
);
4886 uint32_t dw10
= le32_to_cpu(req
->cmd
.cdw10
);
4887 bool attach
= !(dw10
& 0xf);
4888 uint16_t *nr_ids
= &list
[0];
4889 uint16_t *ids
= &list
[1];
4893 trace_pci_nvme_ns_attachment(nvme_cid(req
), dw10
& 0xf);
4895 ns
= nvme_subsys_ns(n
->subsys
, nsid
);
4897 return NVME_INVALID_FIELD
| NVME_DNR
;
4900 ret
= nvme_h2c(n
, (uint8_t *)list
, 4096, req
);
4906 return NVME_NS_CTRL_LIST_INVALID
| NVME_DNR
;
4909 for (i
= 0; i
< *nr_ids
; i
++) {
4910 ctrl
= nvme_subsys_ctrl(n
->subsys
, ids
[i
]);
4912 return NVME_NS_CTRL_LIST_INVALID
| NVME_DNR
;
4916 if (nvme_ns_is_attached(ctrl
, ns
)) {
4917 return NVME_NS_ALREADY_ATTACHED
| NVME_DNR
;
4920 nvme_ns_attach(ctrl
, ns
);
4921 __nvme_select_ns_iocs(ctrl
, ns
);
4923 if (!nvme_ns_is_attached(ctrl
, ns
)) {
4924 return NVME_NS_NOT_ATTACHED
| NVME_DNR
;
4927 nvme_ns_detach(ctrl
, ns
);
4931 * Add namespace id to the changed namespace id list for event clearing
4932 * via Get Log Page command.
4934 if (!test_and_set_bit(nsid
, ctrl
->changed_nsids
)) {
4935 nvme_enqueue_event(ctrl
, NVME_AER_TYPE_NOTICE
,
4936 NVME_AER_INFO_NOTICE_NS_ATTR_CHANGED
,
4937 NVME_LOG_CHANGED_NSLIST
);
4941 return NVME_SUCCESS
;
4944 static uint16_t nvme_format_ns(NvmeCtrl
*n
, NvmeNamespace
*ns
, uint8_t lbaf
,
4945 uint8_t mset
, uint8_t pi
, uint8_t pil
,
4948 int64_t len
, offset
;
4949 struct nvme_aio_format_ctx
*ctx
;
4950 BlockBackend
*blk
= ns
->blkconf
.blk
;
4952 uintptr_t *num_formats
= (uintptr_t *)&req
->opaque
;
4955 if (ns
->params
.zoned
) {
4956 return NVME_INVALID_FORMAT
| NVME_DNR
;
4959 trace_pci_nvme_format_ns(nvme_cid(req
), nvme_nsid(ns
), lbaf
, mset
, pi
, pil
);
4961 if (lbaf
> ns
->id_ns
.nlbaf
) {
4962 return NVME_INVALID_FORMAT
| NVME_DNR
;
4965 ms
= ns
->id_ns
.lbaf
[lbaf
].ms
;
4967 if (pi
&& (ms
< sizeof(NvmeDifTuple
))) {
4968 return NVME_INVALID_FORMAT
| NVME_DNR
;
4971 if (pi
&& pi
> NVME_ID_NS_DPS_TYPE_3
) {
4972 return NVME_INVALID_FIELD
| NVME_DNR
;
4976 nvme_ns_shutdown(ns
);
4977 nvme_ns_cleanup(ns
);
4979 ns
->id_ns
.dps
= (pil
<< 3) | pi
;
4980 ns
->id_ns
.flbas
= lbaf
| (mset
<< 4);
4982 nvme_ns_init_format(ns
);
4984 ns
->status
= NVME_FORMAT_IN_PROGRESS
;
4989 count
= g_new(int, 1);
4995 ctx
= g_new(struct nvme_aio_format_ctx
, 1);
5000 size_t bytes
= MIN(BDRV_REQUEST_MAX_BYTES
, len
);
5004 blk_aio_pwrite_zeroes(blk
, offset
, bytes
, BDRV_REQ_MAY_UNMAP
,
5005 nvme_aio_format_cb
, ctx
);
5014 return NVME_NO_COMPLETE
;
5017 static uint16_t nvme_format(NvmeCtrl
*n
, NvmeRequest
*req
)
5020 uint32_t dw10
= le32_to_cpu(req
->cmd
.cdw10
);
5021 uint32_t nsid
= le32_to_cpu(req
->cmd
.nsid
);
5022 uint8_t lbaf
= dw10
& 0xf;
5023 uint8_t mset
= (dw10
>> 4) & 0x1;
5024 uint8_t pi
= (dw10
>> 5) & 0x7;
5025 uint8_t pil
= (dw10
>> 8) & 0x1;
5026 uintptr_t *num_formats
= (uintptr_t *)&req
->opaque
;
5030 trace_pci_nvme_format(nvme_cid(req
), nsid
, lbaf
, mset
, pi
, pil
);
5032 /* 1-initialize; see the comment in nvme_dsm */
5035 if (nsid
!= NVME_NSID_BROADCAST
) {
5036 if (!nvme_nsid_valid(n
, nsid
)) {
5037 return NVME_INVALID_NSID
| NVME_DNR
;
5040 ns
= nvme_ns(n
, nsid
);
5042 return NVME_INVALID_FIELD
| NVME_DNR
;
5045 status
= nvme_format_ns(n
, ns
, lbaf
, mset
, pi
, pil
, req
);
5046 if (status
&& status
!= NVME_NO_COMPLETE
) {
5047 req
->status
= status
;
5050 for (i
= 1; i
<= n
->num_namespaces
; i
++) {
5056 status
= nvme_format_ns(n
, ns
, lbaf
, mset
, pi
, pil
, req
);
5057 if (status
&& status
!= NVME_NO_COMPLETE
) {
5058 req
->status
= status
;
5064 /* account for the 1-initialization */
5065 if (--(*num_formats
)) {
5066 return NVME_NO_COMPLETE
;
5072 static uint16_t nvme_admin_cmd(NvmeCtrl
*n
, NvmeRequest
*req
)
5074 trace_pci_nvme_admin_cmd(nvme_cid(req
), nvme_sqid(req
), req
->cmd
.opcode
,
5075 nvme_adm_opc_str(req
->cmd
.opcode
));
5077 if (!(nvme_cse_acs
[req
->cmd
.opcode
] & NVME_CMD_EFF_CSUPP
)) {
5078 trace_pci_nvme_err_invalid_admin_opc(req
->cmd
.opcode
);
5079 return NVME_INVALID_OPCODE
| NVME_DNR
;
5082 /* SGLs shall not be used for Admin commands in NVMe over PCIe */
5083 if (NVME_CMD_FLAGS_PSDT(req
->cmd
.flags
) != NVME_PSDT_PRP
) {
5084 return NVME_INVALID_FIELD
| NVME_DNR
;
5087 switch (req
->cmd
.opcode
) {
5088 case NVME_ADM_CMD_DELETE_SQ
:
5089 return nvme_del_sq(n
, req
);
5090 case NVME_ADM_CMD_CREATE_SQ
:
5091 return nvme_create_sq(n
, req
);
5092 case NVME_ADM_CMD_GET_LOG_PAGE
:
5093 return nvme_get_log(n
, req
);
5094 case NVME_ADM_CMD_DELETE_CQ
:
5095 return nvme_del_cq(n
, req
);
5096 case NVME_ADM_CMD_CREATE_CQ
:
5097 return nvme_create_cq(n
, req
);
5098 case NVME_ADM_CMD_IDENTIFY
:
5099 return nvme_identify(n
, req
);
5100 case NVME_ADM_CMD_ABORT
:
5101 return nvme_abort(n
, req
);
5102 case NVME_ADM_CMD_SET_FEATURES
:
5103 return nvme_set_feature(n
, req
);
5104 case NVME_ADM_CMD_GET_FEATURES
:
5105 return nvme_get_feature(n
, req
);
5106 case NVME_ADM_CMD_ASYNC_EV_REQ
:
5107 return nvme_aer(n
, req
);
5108 case NVME_ADM_CMD_NS_ATTACHMENT
:
5109 return nvme_ns_attachment(n
, req
);
5110 case NVME_ADM_CMD_FORMAT_NVM
:
5111 return nvme_format(n
, req
);
5116 return NVME_INVALID_OPCODE
| NVME_DNR
;
5119 static void nvme_process_sq(void *opaque
)
5121 NvmeSQueue
*sq
= opaque
;
5122 NvmeCtrl
*n
= sq
->ctrl
;
5123 NvmeCQueue
*cq
= n
->cq
[sq
->cqid
];
5130 while (!(nvme_sq_empty(sq
) || QTAILQ_EMPTY(&sq
->req_list
))) {
5131 addr
= sq
->dma_addr
+ sq
->head
* n
->sqe_size
;
5132 if (nvme_addr_read(n
, addr
, (void *)&cmd
, sizeof(cmd
))) {
5133 trace_pci_nvme_err_addr_read(addr
);
5134 trace_pci_nvme_err_cfs();
5135 n
->bar
.csts
= NVME_CSTS_FAILED
;
5138 nvme_inc_sq_head(sq
);
5140 req
= QTAILQ_FIRST(&sq
->req_list
);
5141 QTAILQ_REMOVE(&sq
->req_list
, req
, entry
);
5142 QTAILQ_INSERT_TAIL(&sq
->out_req_list
, req
, entry
);
5143 nvme_req_clear(req
);
5144 req
->cqe
.cid
= cmd
.cid
;
5145 memcpy(&req
->cmd
, &cmd
, sizeof(NvmeCmd
));
5147 status
= sq
->sqid
? nvme_io_cmd(n
, req
) :
5148 nvme_admin_cmd(n
, req
);
5149 if (status
!= NVME_NO_COMPLETE
) {
5150 req
->status
= status
;
5151 nvme_enqueue_req_completion(cq
, req
);
5156 static void nvme_ctrl_reset(NvmeCtrl
*n
)
5161 for (i
= 1; i
<= n
->num_namespaces
; i
++) {
5170 for (i
= 0; i
< n
->params
.max_ioqpairs
+ 1; i
++) {
5171 if (n
->sq
[i
] != NULL
) {
5172 nvme_free_sq(n
->sq
[i
], n
);
5175 for (i
= 0; i
< n
->params
.max_ioqpairs
+ 1; i
++) {
5176 if (n
->cq
[i
] != NULL
) {
5177 nvme_free_cq(n
->cq
[i
], n
);
5181 while (!QTAILQ_EMPTY(&n
->aer_queue
)) {
5182 NvmeAsyncEvent
*event
= QTAILQ_FIRST(&n
->aer_queue
);
5183 QTAILQ_REMOVE(&n
->aer_queue
, event
, entry
);
5188 n
->outstanding_aers
= 0;
5189 n
->qs_created
= false;
5194 static void nvme_ctrl_shutdown(NvmeCtrl
*n
)
5200 memory_region_msync(&n
->pmr
.dev
->mr
, 0, n
->pmr
.dev
->size
);
5203 for (i
= 1; i
<= n
->num_namespaces
; i
++) {
5209 nvme_ns_shutdown(ns
);
5213 static void __nvme_select_ns_iocs(NvmeCtrl
*n
, NvmeNamespace
*ns
)
5215 ns
->iocs
= nvme_cse_iocs_none
;
5218 if (NVME_CC_CSS(n
->bar
.cc
) != NVME_CC_CSS_ADMIN_ONLY
) {
5219 ns
->iocs
= nvme_cse_iocs_nvm
;
5222 case NVME_CSI_ZONED
:
5223 if (NVME_CC_CSS(n
->bar
.cc
) == NVME_CC_CSS_CSI
) {
5224 ns
->iocs
= nvme_cse_iocs_zoned
;
5225 } else if (NVME_CC_CSS(n
->bar
.cc
) == NVME_CC_CSS_NVM
) {
5226 ns
->iocs
= nvme_cse_iocs_nvm
;
5232 static void nvme_select_ns_iocs(NvmeCtrl
*n
)
5237 for (i
= 1; i
<= n
->num_namespaces
; i
++) {
5243 __nvme_select_ns_iocs(n
, ns
);
5247 static int nvme_start_ctrl(NvmeCtrl
*n
)
5249 uint32_t page_bits
= NVME_CC_MPS(n
->bar
.cc
) + 12;
5250 uint32_t page_size
= 1 << page_bits
;
5252 if (unlikely(n
->cq
[0])) {
5253 trace_pci_nvme_err_startfail_cq();
5256 if (unlikely(n
->sq
[0])) {
5257 trace_pci_nvme_err_startfail_sq();
5260 if (unlikely(!n
->bar
.asq
)) {
5261 trace_pci_nvme_err_startfail_nbarasq();
5264 if (unlikely(!n
->bar
.acq
)) {
5265 trace_pci_nvme_err_startfail_nbaracq();
5268 if (unlikely(n
->bar
.asq
& (page_size
- 1))) {
5269 trace_pci_nvme_err_startfail_asq_misaligned(n
->bar
.asq
);
5272 if (unlikely(n
->bar
.acq
& (page_size
- 1))) {
5273 trace_pci_nvme_err_startfail_acq_misaligned(n
->bar
.acq
);
5276 if (unlikely(!(NVME_CAP_CSS(n
->bar
.cap
) & (1 << NVME_CC_CSS(n
->bar
.cc
))))) {
5277 trace_pci_nvme_err_startfail_css(NVME_CC_CSS(n
->bar
.cc
));
5280 if (unlikely(NVME_CC_MPS(n
->bar
.cc
) <
5281 NVME_CAP_MPSMIN(n
->bar
.cap
))) {
5282 trace_pci_nvme_err_startfail_page_too_small(
5283 NVME_CC_MPS(n
->bar
.cc
),
5284 NVME_CAP_MPSMIN(n
->bar
.cap
));
5287 if (unlikely(NVME_CC_MPS(n
->bar
.cc
) >
5288 NVME_CAP_MPSMAX(n
->bar
.cap
))) {
5289 trace_pci_nvme_err_startfail_page_too_large(
5290 NVME_CC_MPS(n
->bar
.cc
),
5291 NVME_CAP_MPSMAX(n
->bar
.cap
));
5294 if (unlikely(NVME_CC_IOCQES(n
->bar
.cc
) <
5295 NVME_CTRL_CQES_MIN(n
->id_ctrl
.cqes
))) {
5296 trace_pci_nvme_err_startfail_cqent_too_small(
5297 NVME_CC_IOCQES(n
->bar
.cc
),
5298 NVME_CTRL_CQES_MIN(n
->bar
.cap
));
5301 if (unlikely(NVME_CC_IOCQES(n
->bar
.cc
) >
5302 NVME_CTRL_CQES_MAX(n
->id_ctrl
.cqes
))) {
5303 trace_pci_nvme_err_startfail_cqent_too_large(
5304 NVME_CC_IOCQES(n
->bar
.cc
),
5305 NVME_CTRL_CQES_MAX(n
->bar
.cap
));
5308 if (unlikely(NVME_CC_IOSQES(n
->bar
.cc
) <
5309 NVME_CTRL_SQES_MIN(n
->id_ctrl
.sqes
))) {
5310 trace_pci_nvme_err_startfail_sqent_too_small(
5311 NVME_CC_IOSQES(n
->bar
.cc
),
5312 NVME_CTRL_SQES_MIN(n
->bar
.cap
));
5315 if (unlikely(NVME_CC_IOSQES(n
->bar
.cc
) >
5316 NVME_CTRL_SQES_MAX(n
->id_ctrl
.sqes
))) {
5317 trace_pci_nvme_err_startfail_sqent_too_large(
5318 NVME_CC_IOSQES(n
->bar
.cc
),
5319 NVME_CTRL_SQES_MAX(n
->bar
.cap
));
5322 if (unlikely(!NVME_AQA_ASQS(n
->bar
.aqa
))) {
5323 trace_pci_nvme_err_startfail_asqent_sz_zero();
5326 if (unlikely(!NVME_AQA_ACQS(n
->bar
.aqa
))) {
5327 trace_pci_nvme_err_startfail_acqent_sz_zero();
5331 n
->page_bits
= page_bits
;
5332 n
->page_size
= page_size
;
5333 n
->max_prp_ents
= n
->page_size
/ sizeof(uint64_t);
5334 n
->cqe_size
= 1 << NVME_CC_IOCQES(n
->bar
.cc
);
5335 n
->sqe_size
= 1 << NVME_CC_IOSQES(n
->bar
.cc
);
5336 nvme_init_cq(&n
->admin_cq
, n
, n
->bar
.acq
, 0, 0,
5337 NVME_AQA_ACQS(n
->bar
.aqa
) + 1, 1);
5338 nvme_init_sq(&n
->admin_sq
, n
, n
->bar
.asq
, 0, 0,
5339 NVME_AQA_ASQS(n
->bar
.aqa
) + 1);
5341 nvme_set_timestamp(n
, 0ULL);
5343 QTAILQ_INIT(&n
->aer_queue
);
5345 nvme_select_ns_iocs(n
);
5350 static void nvme_cmb_enable_regs(NvmeCtrl
*n
)
5352 NVME_CMBLOC_SET_CDPCILS(n
->bar
.cmbloc
, 1);
5353 NVME_CMBLOC_SET_CDPMLS(n
->bar
.cmbloc
, 1);
5354 NVME_CMBLOC_SET_BIR(n
->bar
.cmbloc
, NVME_CMB_BIR
);
5356 NVME_CMBSZ_SET_SQS(n
->bar
.cmbsz
, 1);
5357 NVME_CMBSZ_SET_CQS(n
->bar
.cmbsz
, 0);
5358 NVME_CMBSZ_SET_LISTS(n
->bar
.cmbsz
, 1);
5359 NVME_CMBSZ_SET_RDS(n
->bar
.cmbsz
, 1);
5360 NVME_CMBSZ_SET_WDS(n
->bar
.cmbsz
, 1);
5361 NVME_CMBSZ_SET_SZU(n
->bar
.cmbsz
, 2); /* MBs */
5362 NVME_CMBSZ_SET_SZ(n
->bar
.cmbsz
, n
->params
.cmb_size_mb
);
5365 static void nvme_write_bar(NvmeCtrl
*n
, hwaddr offset
, uint64_t data
,
5368 if (unlikely(offset
& (sizeof(uint32_t) - 1))) {
5369 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_misaligned32
,
5370 "MMIO write not 32-bit aligned,"
5371 " offset=0x%"PRIx64
"", offset
);
5372 /* should be ignored, fall through for now */
5375 if (unlikely(size
< sizeof(uint32_t))) {
5376 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_toosmall
,
5377 "MMIO write smaller than 32-bits,"
5378 " offset=0x%"PRIx64
", size=%u",
5380 /* should be ignored, fall through for now */
5384 case 0xc: /* INTMS */
5385 if (unlikely(msix_enabled(&(n
->parent_obj
)))) {
5386 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_intmask_with_msix
,
5387 "undefined access to interrupt mask set"
5388 " when MSI-X is enabled");
5389 /* should be ignored, fall through for now */
5391 n
->bar
.intms
|= data
& 0xffffffff;
5392 n
->bar
.intmc
= n
->bar
.intms
;
5393 trace_pci_nvme_mmio_intm_set(data
& 0xffffffff, n
->bar
.intmc
);
5396 case 0x10: /* INTMC */
5397 if (unlikely(msix_enabled(&(n
->parent_obj
)))) {
5398 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_intmask_with_msix
,
5399 "undefined access to interrupt mask clr"
5400 " when MSI-X is enabled");
5401 /* should be ignored, fall through for now */
5403 n
->bar
.intms
&= ~(data
& 0xffffffff);
5404 n
->bar
.intmc
= n
->bar
.intms
;
5405 trace_pci_nvme_mmio_intm_clr(data
& 0xffffffff, n
->bar
.intmc
);
5409 trace_pci_nvme_mmio_cfg(data
& 0xffffffff);
5410 /* Windows first sends data, then sends enable bit */
5411 if (!NVME_CC_EN(data
) && !NVME_CC_EN(n
->bar
.cc
) &&
5412 !NVME_CC_SHN(data
) && !NVME_CC_SHN(n
->bar
.cc
))
5417 if (NVME_CC_EN(data
) && !NVME_CC_EN(n
->bar
.cc
)) {
5419 if (unlikely(nvme_start_ctrl(n
))) {
5420 trace_pci_nvme_err_startfail();
5421 n
->bar
.csts
= NVME_CSTS_FAILED
;
5423 trace_pci_nvme_mmio_start_success();
5424 n
->bar
.csts
= NVME_CSTS_READY
;
5426 } else if (!NVME_CC_EN(data
) && NVME_CC_EN(n
->bar
.cc
)) {
5427 trace_pci_nvme_mmio_stopped();
5429 n
->bar
.csts
&= ~NVME_CSTS_READY
;
5431 if (NVME_CC_SHN(data
) && !(NVME_CC_SHN(n
->bar
.cc
))) {
5432 trace_pci_nvme_mmio_shutdown_set();
5433 nvme_ctrl_shutdown(n
);
5435 n
->bar
.csts
|= NVME_CSTS_SHST_COMPLETE
;
5436 } else if (!NVME_CC_SHN(data
) && NVME_CC_SHN(n
->bar
.cc
)) {
5437 trace_pci_nvme_mmio_shutdown_cleared();
5438 n
->bar
.csts
&= ~NVME_CSTS_SHST_COMPLETE
;
5442 case 0x1C: /* CSTS */
5443 if (data
& (1 << 4)) {
5444 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_ssreset_w1c_unsupported
,
5445 "attempted to W1C CSTS.NSSRO"
5446 " but CAP.NSSRS is zero (not supported)");
5447 } else if (data
!= 0) {
5448 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_ro_csts
,
5449 "attempted to set a read only bit"
5450 " of controller status");
5453 case 0x20: /* NSSR */
5454 if (data
== 0x4E564D65) {
5455 trace_pci_nvme_ub_mmiowr_ssreset_unsupported();
5457 /* The spec says that writes of other values have no effect */
5461 case 0x24: /* AQA */
5462 n
->bar
.aqa
= data
& 0xffffffff;
5463 trace_pci_nvme_mmio_aqattr(data
& 0xffffffff);
5465 case 0x28: /* ASQ */
5466 n
->bar
.asq
= size
== 8 ? data
:
5467 (n
->bar
.asq
& ~0xffffffffULL
) | (data
& 0xffffffff);
5468 trace_pci_nvme_mmio_asqaddr(data
);
5470 case 0x2c: /* ASQ hi */
5471 n
->bar
.asq
= (n
->bar
.asq
& 0xffffffff) | (data
<< 32);
5472 trace_pci_nvme_mmio_asqaddr_hi(data
, n
->bar
.asq
);
5474 case 0x30: /* ACQ */
5475 trace_pci_nvme_mmio_acqaddr(data
);
5476 n
->bar
.acq
= size
== 8 ? data
:
5477 (n
->bar
.acq
& ~0xffffffffULL
) | (data
& 0xffffffff);
5479 case 0x34: /* ACQ hi */
5480 n
->bar
.acq
= (n
->bar
.acq
& 0xffffffff) | (data
<< 32);
5481 trace_pci_nvme_mmio_acqaddr_hi(data
, n
->bar
.acq
);
5483 case 0x38: /* CMBLOC */
5484 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_cmbloc_reserved
,
5485 "invalid write to reserved CMBLOC"
5486 " when CMBSZ is zero, ignored");
5488 case 0x3C: /* CMBSZ */
5489 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_cmbsz_readonly
,
5490 "invalid write to read only CMBSZ, ignored");
5492 case 0x50: /* CMBMSC */
5493 if (!NVME_CAP_CMBS(n
->bar
.cap
)) {
5497 n
->bar
.cmbmsc
= size
== 8 ? data
:
5498 (n
->bar
.cmbmsc
& ~0xffffffff) | (data
& 0xffffffff);
5499 n
->cmb
.cmse
= false;
5501 if (NVME_CMBMSC_CRE(data
)) {
5502 nvme_cmb_enable_regs(n
);
5504 if (NVME_CMBMSC_CMSE(data
)) {
5505 hwaddr cba
= NVME_CMBMSC_CBA(data
) << CMBMSC_CBA_SHIFT
;
5506 if (cba
+ int128_get64(n
->cmb
.mem
.size
) < cba
) {
5507 NVME_CMBSTS_SET_CBAI(n
->bar
.cmbsts
, 1);
5520 case 0x54: /* CMBMSC hi */
5521 n
->bar
.cmbmsc
= (n
->bar
.cmbmsc
& 0xffffffff) | (data
<< 32);
5524 case 0xE00: /* PMRCAP */
5525 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrcap_readonly
,
5526 "invalid write to PMRCAP register, ignored");
5528 case 0xE04: /* PMRCTL */
5529 n
->bar
.pmrctl
= data
;
5530 if (NVME_PMRCTL_EN(data
)) {
5531 memory_region_set_enabled(&n
->pmr
.dev
->mr
, true);
5534 memory_region_set_enabled(&n
->pmr
.dev
->mr
, false);
5535 NVME_PMRSTS_SET_NRDY(n
->bar
.pmrsts
, 1);
5536 n
->pmr
.cmse
= false;
5539 case 0xE08: /* PMRSTS */
5540 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrsts_readonly
,
5541 "invalid write to PMRSTS register, ignored");
5543 case 0xE0C: /* PMREBS */
5544 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrebs_readonly
,
5545 "invalid write to PMREBS register, ignored");
5547 case 0xE10: /* PMRSWTP */
5548 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrswtp_readonly
,
5549 "invalid write to PMRSWTP register, ignored");
5551 case 0xE14: /* PMRMSCL */
5552 if (!NVME_CAP_PMRS(n
->bar
.cap
)) {
5556 n
->bar
.pmrmsc
= (n
->bar
.pmrmsc
& ~0xffffffff) | (data
& 0xffffffff);
5557 n
->pmr
.cmse
= false;
5559 if (NVME_PMRMSC_CMSE(n
->bar
.pmrmsc
)) {
5560 hwaddr cba
= NVME_PMRMSC_CBA(n
->bar
.pmrmsc
) << PMRMSC_CBA_SHIFT
;
5561 if (cba
+ int128_get64(n
->pmr
.dev
->mr
.size
) < cba
) {
5562 NVME_PMRSTS_SET_CBAI(n
->bar
.pmrsts
, 1);
5571 case 0xE18: /* PMRMSCU */
5572 if (!NVME_CAP_PMRS(n
->bar
.cap
)) {
5576 n
->bar
.pmrmsc
= (n
->bar
.pmrmsc
& 0xffffffff) | (data
<< 32);
5579 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_invalid
,
5580 "invalid MMIO write,"
5581 " offset=0x%"PRIx64
", data=%"PRIx64
"",
5587 static uint64_t nvme_mmio_read(void *opaque
, hwaddr addr
, unsigned size
)
5589 NvmeCtrl
*n
= (NvmeCtrl
*)opaque
;
5590 uint8_t *ptr
= (uint8_t *)&n
->bar
;
5593 trace_pci_nvme_mmio_read(addr
, size
);
5595 if (unlikely(addr
& (sizeof(uint32_t) - 1))) {
5596 NVME_GUEST_ERR(pci_nvme_ub_mmiord_misaligned32
,
5597 "MMIO read not 32-bit aligned,"
5598 " offset=0x%"PRIx64
"", addr
);
5599 /* should RAZ, fall through for now */
5600 } else if (unlikely(size
< sizeof(uint32_t))) {
5601 NVME_GUEST_ERR(pci_nvme_ub_mmiord_toosmall
,
5602 "MMIO read smaller than 32-bits,"
5603 " offset=0x%"PRIx64
"", addr
);
5604 /* should RAZ, fall through for now */
5607 if (addr
< sizeof(n
->bar
)) {
5609 * When PMRWBM bit 1 is set then read from
5610 * from PMRSTS should ensure prior writes
5611 * made it to persistent media
5613 if (addr
== 0xE08 &&
5614 (NVME_PMRCAP_PMRWBM(n
->bar
.pmrcap
) & 0x02)) {
5615 memory_region_msync(&n
->pmr
.dev
->mr
, 0, n
->pmr
.dev
->size
);
5617 memcpy(&val
, ptr
+ addr
, size
);
5619 NVME_GUEST_ERR(pci_nvme_ub_mmiord_invalid_ofs
,
5620 "MMIO read beyond last register,"
5621 " offset=0x%"PRIx64
", returning 0", addr
);
5627 static void nvme_process_db(NvmeCtrl
*n
, hwaddr addr
, int val
)
5631 if (unlikely(addr
& ((1 << 2) - 1))) {
5632 NVME_GUEST_ERR(pci_nvme_ub_db_wr_misaligned
,
5633 "doorbell write not 32-bit aligned,"
5634 " offset=0x%"PRIx64
", ignoring", addr
);
5638 if (((addr
- 0x1000) >> 2) & 1) {
5639 /* Completion queue doorbell write */
5641 uint16_t new_head
= val
& 0xffff;
5645 qid
= (addr
- (0x1000 + (1 << 2))) >> 3;
5646 if (unlikely(nvme_check_cqid(n
, qid
))) {
5647 NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_cq
,
5648 "completion queue doorbell write"
5649 " for nonexistent queue,"
5650 " sqid=%"PRIu32
", ignoring", qid
);
5653 * NVM Express v1.3d, Section 4.1 state: "If host software writes
5654 * an invalid value to the Submission Queue Tail Doorbell or
5655 * Completion Queue Head Doorbell regiter and an Asynchronous Event
5656 * Request command is outstanding, then an asynchronous event is
5657 * posted to the Admin Completion Queue with a status code of
5658 * Invalid Doorbell Write Value."
5660 * Also note that the spec includes the "Invalid Doorbell Register"
5661 * status code, but nowhere does it specify when to use it.
5662 * However, it seems reasonable to use it here in a similar
5665 if (n
->outstanding_aers
) {
5666 nvme_enqueue_event(n
, NVME_AER_TYPE_ERROR
,
5667 NVME_AER_INFO_ERR_INVALID_DB_REGISTER
,
5668 NVME_LOG_ERROR_INFO
);
5675 if (unlikely(new_head
>= cq
->size
)) {
5676 NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_cqhead
,
5677 "completion queue doorbell write value"
5678 " beyond queue size, sqid=%"PRIu32
","
5679 " new_head=%"PRIu16
", ignoring",
5682 if (n
->outstanding_aers
) {
5683 nvme_enqueue_event(n
, NVME_AER_TYPE_ERROR
,
5684 NVME_AER_INFO_ERR_INVALID_DB_VALUE
,
5685 NVME_LOG_ERROR_INFO
);
5691 trace_pci_nvme_mmio_doorbell_cq(cq
->cqid
, new_head
);
5693 start_sqs
= nvme_cq_full(cq
) ? 1 : 0;
5694 cq
->head
= new_head
;
5697 QTAILQ_FOREACH(sq
, &cq
->sq_list
, entry
) {
5698 timer_mod(sq
->timer
, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) + 500);
5700 timer_mod(cq
->timer
, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) + 500);
5703 if (cq
->tail
== cq
->head
) {
5704 nvme_irq_deassert(n
, cq
);
5707 /* Submission queue doorbell write */
5709 uint16_t new_tail
= val
& 0xffff;
5712 qid
= (addr
- 0x1000) >> 3;
5713 if (unlikely(nvme_check_sqid(n
, qid
))) {
5714 NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_sq
,
5715 "submission queue doorbell write"
5716 " for nonexistent queue,"
5717 " sqid=%"PRIu32
", ignoring", qid
);
5719 if (n
->outstanding_aers
) {
5720 nvme_enqueue_event(n
, NVME_AER_TYPE_ERROR
,
5721 NVME_AER_INFO_ERR_INVALID_DB_REGISTER
,
5722 NVME_LOG_ERROR_INFO
);
5729 if (unlikely(new_tail
>= sq
->size
)) {
5730 NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_sqtail
,
5731 "submission queue doorbell write value"
5732 " beyond queue size, sqid=%"PRIu32
","
5733 " new_tail=%"PRIu16
", ignoring",
5736 if (n
->outstanding_aers
) {
5737 nvme_enqueue_event(n
, NVME_AER_TYPE_ERROR
,
5738 NVME_AER_INFO_ERR_INVALID_DB_VALUE
,
5739 NVME_LOG_ERROR_INFO
);
5745 trace_pci_nvme_mmio_doorbell_sq(sq
->sqid
, new_tail
);
5747 sq
->tail
= new_tail
;
5748 timer_mod(sq
->timer
, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) + 500);
5752 static void nvme_mmio_write(void *opaque
, hwaddr addr
, uint64_t data
,
5755 NvmeCtrl
*n
= (NvmeCtrl
*)opaque
;
5757 trace_pci_nvme_mmio_write(addr
, data
, size
);
5759 if (addr
< sizeof(n
->bar
)) {
5760 nvme_write_bar(n
, addr
, data
, size
);
5762 nvme_process_db(n
, addr
, data
);
5766 static const MemoryRegionOps nvme_mmio_ops
= {
5767 .read
= nvme_mmio_read
,
5768 .write
= nvme_mmio_write
,
5769 .endianness
= DEVICE_LITTLE_ENDIAN
,
5771 .min_access_size
= 2,
5772 .max_access_size
= 8,
5776 static void nvme_cmb_write(void *opaque
, hwaddr addr
, uint64_t data
,
5779 NvmeCtrl
*n
= (NvmeCtrl
*)opaque
;
5780 stn_le_p(&n
->cmb
.buf
[addr
], size
, data
);
5783 static uint64_t nvme_cmb_read(void *opaque
, hwaddr addr
, unsigned size
)
5785 NvmeCtrl
*n
= (NvmeCtrl
*)opaque
;
5786 return ldn_le_p(&n
->cmb
.buf
[addr
], size
);
5789 static const MemoryRegionOps nvme_cmb_ops
= {
5790 .read
= nvme_cmb_read
,
5791 .write
= nvme_cmb_write
,
5792 .endianness
= DEVICE_LITTLE_ENDIAN
,
5794 .min_access_size
= 1,
5795 .max_access_size
= 8,
5799 static void nvme_check_constraints(NvmeCtrl
*n
, Error
**errp
)
5801 NvmeParams
*params
= &n
->params
;
5803 if (params
->num_queues
) {
5804 warn_report("num_queues is deprecated; please use max_ioqpairs "
5807 params
->max_ioqpairs
= params
->num_queues
- 1;
5811 warn_report("drive property is deprecated; "
5812 "please use an nvme-ns device instead");
5815 if (params
->max_ioqpairs
< 1 ||
5816 params
->max_ioqpairs
> NVME_MAX_IOQPAIRS
) {
5817 error_setg(errp
, "max_ioqpairs must be between 1 and %d",
5822 if (params
->msix_qsize
< 1 ||
5823 params
->msix_qsize
> PCI_MSIX_FLAGS_QSIZE
+ 1) {
5824 error_setg(errp
, "msix_qsize must be between 1 and %d",
5825 PCI_MSIX_FLAGS_QSIZE
+ 1);
5829 if (!params
->serial
) {
5830 error_setg(errp
, "serial property not set");
5835 if (host_memory_backend_is_mapped(n
->pmr
.dev
)) {
5836 error_setg(errp
, "can't use already busy memdev: %s",
5837 object_get_canonical_path_component(OBJECT(n
->pmr
.dev
)));
5841 if (!is_power_of_2(n
->pmr
.dev
->size
)) {
5842 error_setg(errp
, "pmr backend size needs to be power of 2 in size");
5846 host_memory_backend_set_mapped(n
->pmr
.dev
, true);
5849 if (n
->params
.zasl
> n
->params
.mdts
) {
5850 error_setg(errp
, "zoned.zasl (Zone Append Size Limit) must be less "
5851 "than or equal to mdts (Maximum Data Transfer Size)");
5855 if (!n
->params
.vsl
) {
5856 error_setg(errp
, "vsl must be non-zero");
5861 static void nvme_init_state(NvmeCtrl
*n
)
5863 n
->num_namespaces
= NVME_MAX_NAMESPACES
;
5864 /* add one to max_ioqpairs to account for the admin queue pair */
5865 n
->reg_size
= pow2ceil(sizeof(NvmeBar
) +
5866 2 * (n
->params
.max_ioqpairs
+ 1) * NVME_DB_SIZE
);
5867 n
->sq
= g_new0(NvmeSQueue
*, n
->params
.max_ioqpairs
+ 1);
5868 n
->cq
= g_new0(NvmeCQueue
*, n
->params
.max_ioqpairs
+ 1);
5869 n
->temperature
= NVME_TEMPERATURE
;
5870 n
->features
.temp_thresh_hi
= NVME_TEMPERATURE_WARNING
;
5871 n
->starttime_ms
= qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
);
5872 n
->aer_reqs
= g_new0(NvmeRequest
*, n
->params
.aerl
+ 1);
5875 static int nvme_attach_namespace(NvmeCtrl
*n
, NvmeNamespace
*ns
, Error
**errp
)
5877 if (nvme_ns_is_attached(n
, ns
)) {
5879 "namespace %d is already attached to controller %d",
5880 nvme_nsid(ns
), n
->cntlid
);
5884 nvme_ns_attach(n
, ns
);
5889 int nvme_register_namespace(NvmeCtrl
*n
, NvmeNamespace
*ns
, Error
**errp
)
5891 uint32_t nsid
= nvme_nsid(ns
);
5893 if (nsid
> NVME_MAX_NAMESPACES
) {
5894 error_setg(errp
, "invalid namespace id (must be between 0 and %d)",
5895 NVME_MAX_NAMESPACES
);
5900 for (int i
= 1; i
<= n
->num_namespaces
; i
++) {
5901 if (!nvme_ns(n
, i
)) {
5902 nsid
= ns
->params
.nsid
= i
;
5908 error_setg(errp
, "no free namespace id");
5912 if (n
->namespaces
[nsid
- 1]) {
5913 error_setg(errp
, "namespace id '%d' is already in use", nsid
);
5918 trace_pci_nvme_register_namespace(nsid
);
5921 * If subsys is not given, namespae is always attached to the controller
5922 * because there's no subsystem to manage namespace allocation.
5925 if (ns
->params
.detached
) {
5927 "detached needs nvme-subsys specified nvme or nvme-ns");
5931 return nvme_attach_namespace(n
, ns
, errp
);
5933 if (!ns
->params
.detached
) {
5934 return nvme_attach_namespace(n
, ns
, errp
);
5938 n
->dmrsl
= MIN_NON_ZERO(n
->dmrsl
,
5939 BDRV_REQUEST_MAX_BYTES
/ nvme_l2b(ns
, 1));
5944 static void nvme_init_cmb(NvmeCtrl
*n
, PCIDevice
*pci_dev
)
5946 uint64_t cmb_size
= n
->params
.cmb_size_mb
* MiB
;
5948 n
->cmb
.buf
= g_malloc0(cmb_size
);
5949 memory_region_init_io(&n
->cmb
.mem
, OBJECT(n
), &nvme_cmb_ops
, n
,
5950 "nvme-cmb", cmb_size
);
5951 pci_register_bar(pci_dev
, NVME_CMB_BIR
,
5952 PCI_BASE_ADDRESS_SPACE_MEMORY
|
5953 PCI_BASE_ADDRESS_MEM_TYPE_64
|
5954 PCI_BASE_ADDRESS_MEM_PREFETCH
, &n
->cmb
.mem
);
5956 NVME_CAP_SET_CMBS(n
->bar
.cap
, 1);
5958 if (n
->params
.legacy_cmb
) {
5959 nvme_cmb_enable_regs(n
);
5964 static void nvme_init_pmr(NvmeCtrl
*n
, PCIDevice
*pci_dev
)
5966 NVME_PMRCAP_SET_RDS(n
->bar
.pmrcap
, 1);
5967 NVME_PMRCAP_SET_WDS(n
->bar
.pmrcap
, 1);
5968 NVME_PMRCAP_SET_BIR(n
->bar
.pmrcap
, NVME_PMR_BIR
);
5969 /* Turn on bit 1 support */
5970 NVME_PMRCAP_SET_PMRWBM(n
->bar
.pmrcap
, 0x02);
5971 NVME_PMRCAP_SET_CMSS(n
->bar
.pmrcap
, 1);
5973 pci_register_bar(pci_dev
, NVME_PMRCAP_BIR(n
->bar
.pmrcap
),
5974 PCI_BASE_ADDRESS_SPACE_MEMORY
|
5975 PCI_BASE_ADDRESS_MEM_TYPE_64
|
5976 PCI_BASE_ADDRESS_MEM_PREFETCH
, &n
->pmr
.dev
->mr
);
5978 memory_region_set_enabled(&n
->pmr
.dev
->mr
, false);
5981 static int nvme_init_pci(NvmeCtrl
*n
, PCIDevice
*pci_dev
, Error
**errp
)
5983 uint8_t *pci_conf
= pci_dev
->config
;
5984 uint64_t bar_size
, msix_table_size
, msix_pba_size
;
5985 unsigned msix_table_offset
, msix_pba_offset
;
5990 pci_conf
[PCI_INTERRUPT_PIN
] = 1;
5991 pci_config_set_prog_interface(pci_conf
, 0x2);
5993 if (n
->params
.use_intel_id
) {
5994 pci_config_set_vendor_id(pci_conf
, PCI_VENDOR_ID_INTEL
);
5995 pci_config_set_device_id(pci_conf
, 0x5845);
5997 pci_config_set_vendor_id(pci_conf
, PCI_VENDOR_ID_REDHAT
);
5998 pci_config_set_device_id(pci_conf
, PCI_DEVICE_ID_REDHAT_NVME
);
6001 pci_config_set_class(pci_conf
, PCI_CLASS_STORAGE_EXPRESS
);
6002 pcie_endpoint_cap_init(pci_dev
, 0x80);
6004 bar_size
= QEMU_ALIGN_UP(n
->reg_size
, 4 * KiB
);
6005 msix_table_offset
= bar_size
;
6006 msix_table_size
= PCI_MSIX_ENTRY_SIZE
* n
->params
.msix_qsize
;
6008 bar_size
+= msix_table_size
;
6009 bar_size
= QEMU_ALIGN_UP(bar_size
, 4 * KiB
);
6010 msix_pba_offset
= bar_size
;
6011 msix_pba_size
= QEMU_ALIGN_UP(n
->params
.msix_qsize
, 64) / 8;
6013 bar_size
+= msix_pba_size
;
6014 bar_size
= pow2ceil(bar_size
);
6016 memory_region_init(&n
->bar0
, OBJECT(n
), "nvme-bar0", bar_size
);
6017 memory_region_init_io(&n
->iomem
, OBJECT(n
), &nvme_mmio_ops
, n
, "nvme",
6019 memory_region_add_subregion(&n
->bar0
, 0, &n
->iomem
);
6021 pci_register_bar(pci_dev
, 0, PCI_BASE_ADDRESS_SPACE_MEMORY
|
6022 PCI_BASE_ADDRESS_MEM_TYPE_64
, &n
->bar0
);
6023 ret
= msix_init(pci_dev
, n
->params
.msix_qsize
,
6024 &n
->bar0
, 0, msix_table_offset
,
6025 &n
->bar0
, 0, msix_pba_offset
, 0, &err
);
6027 if (ret
== -ENOTSUP
) {
6028 warn_report_err(err
);
6030 error_propagate(errp
, err
);
6035 if (n
->params
.cmb_size_mb
) {
6036 nvme_init_cmb(n
, pci_dev
);
6040 nvme_init_pmr(n
, pci_dev
);
6046 static void nvme_init_subnqn(NvmeCtrl
*n
)
6048 NvmeSubsystem
*subsys
= n
->subsys
;
6049 NvmeIdCtrl
*id
= &n
->id_ctrl
;
6052 snprintf((char *)id
->subnqn
, sizeof(id
->subnqn
),
6053 "nqn.2019-08.org.qemu:%s", n
->params
.serial
);
6055 pstrcpy((char *)id
->subnqn
, sizeof(id
->subnqn
), (char*)subsys
->subnqn
);
6059 static void nvme_init_ctrl(NvmeCtrl
*n
, PCIDevice
*pci_dev
)
6061 NvmeIdCtrl
*id
= &n
->id_ctrl
;
6062 uint8_t *pci_conf
= pci_dev
->config
;
6064 id
->vid
= cpu_to_le16(pci_get_word(pci_conf
+ PCI_VENDOR_ID
));
6065 id
->ssvid
= cpu_to_le16(pci_get_word(pci_conf
+ PCI_SUBSYSTEM_VENDOR_ID
));
6066 strpadcpy((char *)id
->mn
, sizeof(id
->mn
), "QEMU NVMe Ctrl", ' ');
6067 strpadcpy((char *)id
->fr
, sizeof(id
->fr
), "1.0", ' ');
6068 strpadcpy((char *)id
->sn
, sizeof(id
->sn
), n
->params
.serial
, ' ');
6070 id
->cntlid
= cpu_to_le16(n
->cntlid
);
6072 id
->oaes
= cpu_to_le32(NVME_OAES_NS_ATTR
);
6076 if (n
->params
.use_intel_id
) {
6086 id
->mdts
= n
->params
.mdts
;
6087 id
->ver
= cpu_to_le32(NVME_SPEC_VER
);
6088 id
->oacs
= cpu_to_le16(NVME_OACS_NS_MGMT
| NVME_OACS_FORMAT
);
6089 id
->cntrltype
= 0x1;
6092 * Because the controller always completes the Abort command immediately,
6093 * there can never be more than one concurrently executing Abort command,
6094 * so this value is never used for anything. Note that there can easily be
6095 * many Abort commands in the queues, but they are not considered
6096 * "executing" until processed by nvme_abort.
6098 * The specification recommends a value of 3 for Abort Command Limit (four
6099 * concurrently outstanding Abort commands), so lets use that though it is
6103 id
->aerl
= n
->params
.aerl
;
6104 id
->frmw
= (NVME_NUM_FW_SLOTS
<< 1) | NVME_FRMW_SLOT1_RO
;
6105 id
->lpa
= NVME_LPA_NS_SMART
| NVME_LPA_CSE
| NVME_LPA_EXTENDED
;
6107 /* recommended default value (~70 C) */
6108 id
->wctemp
= cpu_to_le16(NVME_TEMPERATURE_WARNING
);
6109 id
->cctemp
= cpu_to_le16(NVME_TEMPERATURE_CRITICAL
);
6111 id
->sqes
= (0x6 << 4) | 0x6;
6112 id
->cqes
= (0x4 << 4) | 0x4;
6113 id
->nn
= cpu_to_le32(n
->num_namespaces
);
6114 id
->oncs
= cpu_to_le16(NVME_ONCS_WRITE_ZEROES
| NVME_ONCS_TIMESTAMP
|
6115 NVME_ONCS_FEATURES
| NVME_ONCS_DSM
|
6116 NVME_ONCS_COMPARE
| NVME_ONCS_COPY
);
6119 * NOTE: If this device ever supports a command set that does NOT use 0x0
6120 * as a Flush-equivalent operation, support for the broadcast NSID in Flush
6121 * should probably be removed.
6123 * See comment in nvme_io_cmd.
6125 id
->vwc
= NVME_VWC_NSID_BROADCAST_SUPPORT
| NVME_VWC_PRESENT
;
6127 id
->ocfs
= cpu_to_le16(NVME_OCFS_COPY_FORMAT_0
);
6128 id
->sgls
= cpu_to_le32(NVME_CTRL_SGLS_SUPPORT_NO_ALIGN
|
6129 NVME_CTRL_SGLS_BITBUCKET
);
6131 nvme_init_subnqn(n
);
6133 id
->psd
[0].mp
= cpu_to_le16(0x9c4);
6134 id
->psd
[0].enlat
= cpu_to_le32(0x10);
6135 id
->psd
[0].exlat
= cpu_to_le32(0x4);
6138 id
->cmic
|= NVME_CMIC_MULTI_CTRL
;
6141 NVME_CAP_SET_MQES(n
->bar
.cap
, 0x7ff);
6142 NVME_CAP_SET_CQR(n
->bar
.cap
, 1);
6143 NVME_CAP_SET_TO(n
->bar
.cap
, 0xf);
6144 NVME_CAP_SET_CSS(n
->bar
.cap
, NVME_CAP_CSS_NVM
);
6145 NVME_CAP_SET_CSS(n
->bar
.cap
, NVME_CAP_CSS_CSI_SUPP
);
6146 NVME_CAP_SET_CSS(n
->bar
.cap
, NVME_CAP_CSS_ADMIN_ONLY
);
6147 NVME_CAP_SET_MPSMAX(n
->bar
.cap
, 4);
6148 NVME_CAP_SET_CMBS(n
->bar
.cap
, n
->params
.cmb_size_mb
? 1 : 0);
6149 NVME_CAP_SET_PMRS(n
->bar
.cap
, n
->pmr
.dev
? 1 : 0);
6151 n
->bar
.vs
= NVME_SPEC_VER
;
6152 n
->bar
.intmc
= n
->bar
.intms
= 0;
6155 static int nvme_init_subsys(NvmeCtrl
*n
, Error
**errp
)
6163 cntlid
= nvme_subsys_register_ctrl(n
, errp
);
6173 static void nvme_realize(PCIDevice
*pci_dev
, Error
**errp
)
6175 NvmeCtrl
*n
= NVME(pci_dev
);
6177 Error
*local_err
= NULL
;
6179 nvme_check_constraints(n
, &local_err
);
6181 error_propagate(errp
, local_err
);
6185 qbus_create_inplace(&n
->bus
, sizeof(NvmeBus
), TYPE_NVME_BUS
,
6186 &pci_dev
->qdev
, n
->parent_obj
.qdev
.id
);
6189 if (nvme_init_pci(n
, pci_dev
, errp
)) {
6193 if (nvme_init_subsys(n
, errp
)) {
6194 error_propagate(errp
, local_err
);
6197 nvme_init_ctrl(n
, pci_dev
);
6199 /* setup a namespace if the controller drive property was given */
6200 if (n
->namespace.blkconf
.blk
) {
6202 ns
->params
.nsid
= 1;
6204 if (nvme_ns_setup(ns
, errp
)) {
6208 if (nvme_register_namespace(n
, ns
, errp
)) {
6214 static void nvme_exit(PCIDevice
*pci_dev
)
6216 NvmeCtrl
*n
= NVME(pci_dev
);
6222 for (i
= 1; i
<= n
->num_namespaces
; i
++) {
6228 nvme_ns_cleanup(ns
);
6233 g_free(n
->aer_reqs
);
6235 if (n
->params
.cmb_size_mb
) {
6240 host_memory_backend_set_mapped(n
->pmr
.dev
, false);
6242 msix_uninit_exclusive_bar(pci_dev
);
6245 static Property nvme_props
[] = {
6246 DEFINE_BLOCK_PROPERTIES(NvmeCtrl
, namespace.blkconf
),
6247 DEFINE_PROP_LINK("pmrdev", NvmeCtrl
, pmr
.dev
, TYPE_MEMORY_BACKEND
,
6248 HostMemoryBackend
*),
6249 DEFINE_PROP_LINK("subsys", NvmeCtrl
, subsys
, TYPE_NVME_SUBSYS
,
6251 DEFINE_PROP_STRING("serial", NvmeCtrl
, params
.serial
),
6252 DEFINE_PROP_UINT32("cmb_size_mb", NvmeCtrl
, params
.cmb_size_mb
, 0),
6253 DEFINE_PROP_UINT32("num_queues", NvmeCtrl
, params
.num_queues
, 0),
6254 DEFINE_PROP_UINT32("max_ioqpairs", NvmeCtrl
, params
.max_ioqpairs
, 64),
6255 DEFINE_PROP_UINT16("msix_qsize", NvmeCtrl
, params
.msix_qsize
, 65),
6256 DEFINE_PROP_UINT8("aerl", NvmeCtrl
, params
.aerl
, 3),
6257 DEFINE_PROP_UINT32("aer_max_queued", NvmeCtrl
, params
.aer_max_queued
, 64),
6258 DEFINE_PROP_UINT8("mdts", NvmeCtrl
, params
.mdts
, 7),
6259 DEFINE_PROP_UINT8("vsl", NvmeCtrl
, params
.vsl
, 7),
6260 DEFINE_PROP_BOOL("use-intel-id", NvmeCtrl
, params
.use_intel_id
, false),
6261 DEFINE_PROP_BOOL("legacy-cmb", NvmeCtrl
, params
.legacy_cmb
, false),
6262 DEFINE_PROP_UINT8("zoned.zasl", NvmeCtrl
, params
.zasl
, 0),
6263 DEFINE_PROP_END_OF_LIST(),
6266 static void nvme_get_smart_warning(Object
*obj
, Visitor
*v
, const char *name
,
6267 void *opaque
, Error
**errp
)
6269 NvmeCtrl
*n
= NVME(obj
);
6270 uint8_t value
= n
->smart_critical_warning
;
6272 visit_type_uint8(v
, name
, &value
, errp
);
6275 static void nvme_set_smart_warning(Object
*obj
, Visitor
*v
, const char *name
,
6276 void *opaque
, Error
**errp
)
6278 NvmeCtrl
*n
= NVME(obj
);
6279 uint8_t value
, old_value
, cap
= 0, index
, event
;
6281 if (!visit_type_uint8(v
, name
, &value
, errp
)) {
6285 cap
= NVME_SMART_SPARE
| NVME_SMART_TEMPERATURE
| NVME_SMART_RELIABILITY
6286 | NVME_SMART_MEDIA_READ_ONLY
| NVME_SMART_FAILED_VOLATILE_MEDIA
;
6287 if (NVME_CAP_PMRS(n
->bar
.cap
)) {
6288 cap
|= NVME_SMART_PMR_UNRELIABLE
;
6291 if ((value
& cap
) != value
) {
6292 error_setg(errp
, "unsupported smart critical warning bits: 0x%x",
6297 old_value
= n
->smart_critical_warning
;
6298 n
->smart_critical_warning
= value
;
6300 /* only inject new bits of smart critical warning */
6301 for (index
= 0; index
< NVME_SMART_WARN_MAX
; index
++) {
6303 if (value
& ~old_value
& event
)
6304 nvme_smart_event(n
, event
);
6308 static const VMStateDescription nvme_vmstate
= {
6313 static void nvme_class_init(ObjectClass
*oc
, void *data
)
6315 DeviceClass
*dc
= DEVICE_CLASS(oc
);
6316 PCIDeviceClass
*pc
= PCI_DEVICE_CLASS(oc
);
6318 pc
->realize
= nvme_realize
;
6319 pc
->exit
= nvme_exit
;
6320 pc
->class_id
= PCI_CLASS_STORAGE_EXPRESS
;
6323 set_bit(DEVICE_CATEGORY_STORAGE
, dc
->categories
);
6324 dc
->desc
= "Non-Volatile Memory Express";
6325 device_class_set_props(dc
, nvme_props
);
6326 dc
->vmsd
= &nvme_vmstate
;
6329 static void nvme_instance_init(Object
*obj
)
6331 NvmeCtrl
*n
= NVME(obj
);
6333 if (n
->namespace.blkconf
.blk
) {
6334 device_add_bootindex_property(obj
, &n
->namespace.blkconf
.bootindex
,
6335 "bootindex", "/namespace@1,0",
6339 object_property_add(obj
, "smart_critical_warning", "uint8",
6340 nvme_get_smart_warning
,
6341 nvme_set_smart_warning
, NULL
, NULL
);
6344 static const TypeInfo nvme_info
= {
6346 .parent
= TYPE_PCI_DEVICE
,
6347 .instance_size
= sizeof(NvmeCtrl
),
6348 .instance_init
= nvme_instance_init
,
6349 .class_init
= nvme_class_init
,
6350 .interfaces
= (InterfaceInfo
[]) {
6351 { INTERFACE_PCIE_DEVICE
},
6356 static const TypeInfo nvme_bus_info
= {
6357 .name
= TYPE_NVME_BUS
,
6359 .instance_size
= sizeof(NvmeBus
),
6362 static void nvme_register_types(void)
6364 type_register_static(&nvme_info
);
6365 type_register_static(&nvme_bus_info
);
6368 type_init(nvme_register_types
)