2 * QEMU NVM Express Controller
4 * Copyright (c) 2012, Intel Corporation
6 * Written by Keith Busch <keith.busch@intel.com>
8 * This code is licensed under the GNU GPL v2 or later.
12 * Reference Specs: http://www.nvmexpress.org, 1.4, 1.3, 1.2, 1.1, 1.0e
14 * https://nvmexpress.org/developers/nvme-specification/
17 * Notes on coding style
18 * ---------------------
19 * While QEMU coding style prefers lowercase hexadecimals in constants, the
20 * NVMe subsystem use thes format from the NVMe specifications in the comments
21 * (i.e. 'h' suffix instead of '0x' prefix).
25 * See docs/system/nvme.rst for extensive documentation.
28 * -drive file=<file>,if=none,id=<drive_id>
29 * -device nvme-subsys,id=<subsys_id>,nqn=<nqn_id>
30 * -device nvme,serial=<serial>,id=<bus_name>, \
31 * cmb_size_mb=<cmb_size_mb[optional]>, \
32 * [pmrdev=<mem_backend_file_id>,] \
33 * max_ioqpairs=<N[optional]>, \
34 * aerl=<N[optional]>,aer_max_queued=<N[optional]>, \
35 * mdts=<N[optional]>,vsl=<N[optional]>, \
36 * zoned.zasl=<N[optional]>, \
37 * zoned.auto_transition=<on|off[optional]>, \
39 * -device nvme-ns,drive=<drive_id>,bus=<bus_name>,nsid=<nsid>,\
40 * zoned=<true|false[optional]>, \
41 * subsys=<subsys_id>,detached=<true|false[optional]>
43 * Note cmb_size_mb denotes size of CMB in MB. CMB is assumed to be at
44 * offset 0 in BAR2 and supports only WDS, RDS and SQS for now. By default, the
45 * device will use the "v1.4 CMB scheme" - use the `legacy-cmb` parameter to
46 * always enable the CMBLOC and CMBSZ registers (v1.3 behavior).
48 * Enabling pmr emulation can be achieved by pointing to memory-backend-file.
50 * -object memory-backend-file,id=<mem_id>,share=on,mem-path=<file_path>, \
51 * size=<size> .... -device nvme,...,pmrdev=<mem_id>
53 * The PMR will use BAR 4/5 exclusively.
55 * To place controller(s) and namespace(s) to a subsystem, then provide
56 * nvme-subsys device as above.
58 * nvme subsystem device parameters
59 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
61 * This parameter provides the `<nqn_id>` part of the string
62 * `nqn.2019-08.org.qemu:<nqn_id>` which will be reported in the SUBNQN field
63 * of subsystem controllers. Note that `<nqn_id>` should be unique per
64 * subsystem, but this is not enforced by QEMU. If not specified, it will
65 * default to the value of the `id` parameter (`<subsys_id>`).
67 * nvme device parameters
68 * ~~~~~~~~~~~~~~~~~~~~~~
70 * Specifying this parameter attaches the controller to the subsystem and
71 * the SUBNQN field in the controller will report the NQN of the subsystem
72 * device. This also enables multi controller capability represented in
73 * Identify Controller data structure in CMIC (Controller Multi-path I/O and
74 * Namesapce Sharing Capabilities).
77 * The Asynchronous Event Request Limit (AERL). Indicates the maximum number
78 * of concurrently outstanding Asynchronous Event Request commands support
79 * by the controller. This is a 0's based value.
82 * This is the maximum number of events that the device will enqueue for
83 * completion when there are no outstanding AERs. When the maximum number of
84 * enqueued events are reached, subsequent events will be dropped.
87 * Indicates the maximum data transfer size for a command that transfers data
88 * between host-accessible memory and the controller. The value is specified
89 * as a power of two (2^n) and is in units of the minimum memory page size
90 * (CAP.MPSMIN). The default value is 7 (i.e. 512 KiB).
93 * Indicates the maximum data size limit for the Verify command. Like `mdts`,
94 * this value is specified as a power of two (2^n) and is in units of the
95 * minimum memory page size (CAP.MPSMIN). The default value is 7 (i.e. 512
99 * Indicates the maximum data transfer size for the Zone Append command. Like
100 * `mdts`, the value is specified as a power of two (2^n) and is in units of
101 * the minimum memory page size (CAP.MPSMIN). The default value is 0 (i.e.
102 * defaulting to the value of `mdts`).
104 * - `zoned.auto_transition`
105 * Indicates if zones in zone state implicitly opened can be automatically
106 * transitioned to zone state closed for resource management purposes.
109 * nvme namespace device parameters
110 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
112 * When the parent nvme device (as defined explicitly by the 'bus' parameter
113 * or implicitly by the most recently defined NvmeBus) is linked to an
114 * nvme-subsys device, the namespace will be attached to all controllers in
115 * the subsystem. If set to 'off' (the default), the namespace will remain a
116 * private namespace and may only be attached to a single controller at a
120 * This parameter is only valid together with the `subsys` parameter. If left
121 * at the default value (`false/off`), the namespace will be attached to all
122 * controllers in the NVMe subsystem at boot-up. If set to `true/on`, the
123 * namespace will be available in the subsystem but not attached to any
126 * Setting `zoned` to true selects Zoned Command Set at the namespace.
127 * In this case, the following namespace properties are available to configure
129 * zoned.zone_size=<zone size in bytes, default: 128MiB>
130 * The number may be followed by K, M, G as in kilo-, mega- or giga-.
132 * zoned.zone_capacity=<zone capacity in bytes, default: zone size>
133 * The value 0 (default) forces zone capacity to be the same as zone
134 * size. The value of this property may not exceed zone size.
136 * zoned.descr_ext_size=<zone descriptor extension size, default 0>
137 * This value needs to be specified in 64B units. If it is zero,
138 * namespace(s) will not support zone descriptor extensions.
140 * zoned.max_active=<Maximum Active Resources (zones), default: 0>
141 * The default value means there is no limit to the number of
142 * concurrently active zones.
144 * zoned.max_open=<Maximum Open Resources (zones), default: 0>
145 * The default value means there is no limit to the number of
146 * concurrently open zones.
148 * zoned.cross_read=<enable RAZB, default: false>
149 * Setting this property to true enables Read Across Zone Boundaries.
152 #include "qemu/osdep.h"
153 #include "qemu/cutils.h"
154 #include "qemu/error-report.h"
155 #include "qemu/log.h"
156 #include "qemu/units.h"
157 #include "qapi/error.h"
158 #include "qapi/visitor.h"
159 #include "sysemu/sysemu.h"
160 #include "sysemu/block-backend.h"
161 #include "sysemu/hostmem.h"
162 #include "hw/pci/msix.h"
163 #include "migration/vmstate.h"
168 #define NVME_MAX_IOQPAIRS 0xffff
169 #define NVME_DB_SIZE 4
170 #define NVME_SPEC_VER 0x00010400
171 #define NVME_CMB_BIR 2
172 #define NVME_PMR_BIR 4
173 #define NVME_TEMPERATURE 0x143
174 #define NVME_TEMPERATURE_WARNING 0x157
175 #define NVME_TEMPERATURE_CRITICAL 0x175
176 #define NVME_NUM_FW_SLOTS 1
177 #define NVME_DEFAULT_MAX_ZA_SIZE (128 * KiB)
179 #define NVME_GUEST_ERR(trace, fmt, ...) \
181 (trace_##trace)(__VA_ARGS__); \
182 qemu_log_mask(LOG_GUEST_ERROR, #trace \
183 " in %s: " fmt "\n", __func__, ## __VA_ARGS__); \
186 static const bool nvme_feature_support
[NVME_FID_MAX
] = {
187 [NVME_ARBITRATION
] = true,
188 [NVME_POWER_MANAGEMENT
] = true,
189 [NVME_TEMPERATURE_THRESHOLD
] = true,
190 [NVME_ERROR_RECOVERY
] = true,
191 [NVME_VOLATILE_WRITE_CACHE
] = true,
192 [NVME_NUMBER_OF_QUEUES
] = true,
193 [NVME_INTERRUPT_COALESCING
] = true,
194 [NVME_INTERRUPT_VECTOR_CONF
] = true,
195 [NVME_WRITE_ATOMICITY
] = true,
196 [NVME_ASYNCHRONOUS_EVENT_CONF
] = true,
197 [NVME_TIMESTAMP
] = true,
198 [NVME_COMMAND_SET_PROFILE
] = true,
201 static const uint32_t nvme_feature_cap
[NVME_FID_MAX
] = {
202 [NVME_TEMPERATURE_THRESHOLD
] = NVME_FEAT_CAP_CHANGE
,
203 [NVME_ERROR_RECOVERY
] = NVME_FEAT_CAP_CHANGE
| NVME_FEAT_CAP_NS
,
204 [NVME_VOLATILE_WRITE_CACHE
] = NVME_FEAT_CAP_CHANGE
,
205 [NVME_NUMBER_OF_QUEUES
] = NVME_FEAT_CAP_CHANGE
,
206 [NVME_ASYNCHRONOUS_EVENT_CONF
] = NVME_FEAT_CAP_CHANGE
,
207 [NVME_TIMESTAMP
] = NVME_FEAT_CAP_CHANGE
,
208 [NVME_COMMAND_SET_PROFILE
] = NVME_FEAT_CAP_CHANGE
,
211 static const uint32_t nvme_cse_acs
[256] = {
212 [NVME_ADM_CMD_DELETE_SQ
] = NVME_CMD_EFF_CSUPP
,
213 [NVME_ADM_CMD_CREATE_SQ
] = NVME_CMD_EFF_CSUPP
,
214 [NVME_ADM_CMD_GET_LOG_PAGE
] = NVME_CMD_EFF_CSUPP
,
215 [NVME_ADM_CMD_DELETE_CQ
] = NVME_CMD_EFF_CSUPP
,
216 [NVME_ADM_CMD_CREATE_CQ
] = NVME_CMD_EFF_CSUPP
,
217 [NVME_ADM_CMD_IDENTIFY
] = NVME_CMD_EFF_CSUPP
,
218 [NVME_ADM_CMD_ABORT
] = NVME_CMD_EFF_CSUPP
,
219 [NVME_ADM_CMD_SET_FEATURES
] = NVME_CMD_EFF_CSUPP
,
220 [NVME_ADM_CMD_GET_FEATURES
] = NVME_CMD_EFF_CSUPP
,
221 [NVME_ADM_CMD_ASYNC_EV_REQ
] = NVME_CMD_EFF_CSUPP
,
222 [NVME_ADM_CMD_NS_ATTACHMENT
] = NVME_CMD_EFF_CSUPP
| NVME_CMD_EFF_NIC
,
223 [NVME_ADM_CMD_FORMAT_NVM
] = NVME_CMD_EFF_CSUPP
| NVME_CMD_EFF_LBCC
,
226 static const uint32_t nvme_cse_iocs_none
[256];
228 static const uint32_t nvme_cse_iocs_nvm
[256] = {
229 [NVME_CMD_FLUSH
] = NVME_CMD_EFF_CSUPP
| NVME_CMD_EFF_LBCC
,
230 [NVME_CMD_WRITE_ZEROES
] = NVME_CMD_EFF_CSUPP
| NVME_CMD_EFF_LBCC
,
231 [NVME_CMD_WRITE
] = NVME_CMD_EFF_CSUPP
| NVME_CMD_EFF_LBCC
,
232 [NVME_CMD_READ
] = NVME_CMD_EFF_CSUPP
,
233 [NVME_CMD_DSM
] = NVME_CMD_EFF_CSUPP
| NVME_CMD_EFF_LBCC
,
234 [NVME_CMD_VERIFY
] = NVME_CMD_EFF_CSUPP
,
235 [NVME_CMD_COPY
] = NVME_CMD_EFF_CSUPP
| NVME_CMD_EFF_LBCC
,
236 [NVME_CMD_COMPARE
] = NVME_CMD_EFF_CSUPP
,
239 static const uint32_t nvme_cse_iocs_zoned
[256] = {
240 [NVME_CMD_FLUSH
] = NVME_CMD_EFF_CSUPP
| NVME_CMD_EFF_LBCC
,
241 [NVME_CMD_WRITE_ZEROES
] = NVME_CMD_EFF_CSUPP
| NVME_CMD_EFF_LBCC
,
242 [NVME_CMD_WRITE
] = NVME_CMD_EFF_CSUPP
| NVME_CMD_EFF_LBCC
,
243 [NVME_CMD_READ
] = NVME_CMD_EFF_CSUPP
,
244 [NVME_CMD_DSM
] = NVME_CMD_EFF_CSUPP
| NVME_CMD_EFF_LBCC
,
245 [NVME_CMD_VERIFY
] = NVME_CMD_EFF_CSUPP
,
246 [NVME_CMD_COPY
] = NVME_CMD_EFF_CSUPP
| NVME_CMD_EFF_LBCC
,
247 [NVME_CMD_COMPARE
] = NVME_CMD_EFF_CSUPP
,
248 [NVME_CMD_ZONE_APPEND
] = NVME_CMD_EFF_CSUPP
| NVME_CMD_EFF_LBCC
,
249 [NVME_CMD_ZONE_MGMT_SEND
] = NVME_CMD_EFF_CSUPP
| NVME_CMD_EFF_LBCC
,
250 [NVME_CMD_ZONE_MGMT_RECV
] = NVME_CMD_EFF_CSUPP
,
253 static void nvme_process_sq(void *opaque
);
255 static uint16_t nvme_sqid(NvmeRequest
*req
)
257 return le16_to_cpu(req
->sq
->sqid
);
260 static void nvme_assign_zone_state(NvmeNamespace
*ns
, NvmeZone
*zone
,
263 if (QTAILQ_IN_USE(zone
, entry
)) {
264 switch (nvme_get_zone_state(zone
)) {
265 case NVME_ZONE_STATE_EXPLICITLY_OPEN
:
266 QTAILQ_REMOVE(&ns
->exp_open_zones
, zone
, entry
);
268 case NVME_ZONE_STATE_IMPLICITLY_OPEN
:
269 QTAILQ_REMOVE(&ns
->imp_open_zones
, zone
, entry
);
271 case NVME_ZONE_STATE_CLOSED
:
272 QTAILQ_REMOVE(&ns
->closed_zones
, zone
, entry
);
274 case NVME_ZONE_STATE_FULL
:
275 QTAILQ_REMOVE(&ns
->full_zones
, zone
, entry
);
281 nvme_set_zone_state(zone
, state
);
284 case NVME_ZONE_STATE_EXPLICITLY_OPEN
:
285 QTAILQ_INSERT_TAIL(&ns
->exp_open_zones
, zone
, entry
);
287 case NVME_ZONE_STATE_IMPLICITLY_OPEN
:
288 QTAILQ_INSERT_TAIL(&ns
->imp_open_zones
, zone
, entry
);
290 case NVME_ZONE_STATE_CLOSED
:
291 QTAILQ_INSERT_TAIL(&ns
->closed_zones
, zone
, entry
);
293 case NVME_ZONE_STATE_FULL
:
294 QTAILQ_INSERT_TAIL(&ns
->full_zones
, zone
, entry
);
295 case NVME_ZONE_STATE_READ_ONLY
:
303 * Check if we can open a zone without exceeding open/active limits.
304 * AOR stands for "Active and Open Resources" (see TP 4053 section 2.5).
306 static int nvme_aor_check(NvmeNamespace
*ns
, uint32_t act
, uint32_t opn
)
308 if (ns
->params
.max_active_zones
!= 0 &&
309 ns
->nr_active_zones
+ act
> ns
->params
.max_active_zones
) {
310 trace_pci_nvme_err_insuff_active_res(ns
->params
.max_active_zones
);
311 return NVME_ZONE_TOO_MANY_ACTIVE
| NVME_DNR
;
313 if (ns
->params
.max_open_zones
!= 0 &&
314 ns
->nr_open_zones
+ opn
> ns
->params
.max_open_zones
) {
315 trace_pci_nvme_err_insuff_open_res(ns
->params
.max_open_zones
);
316 return NVME_ZONE_TOO_MANY_OPEN
| NVME_DNR
;
322 static bool nvme_addr_is_cmb(NvmeCtrl
*n
, hwaddr addr
)
330 lo
= n
->params
.legacy_cmb
? n
->cmb
.mem
.addr
: n
->cmb
.cba
;
331 hi
= lo
+ int128_get64(n
->cmb
.mem
.size
);
333 return addr
>= lo
&& addr
< hi
;
336 static inline void *nvme_addr_to_cmb(NvmeCtrl
*n
, hwaddr addr
)
338 hwaddr base
= n
->params
.legacy_cmb
? n
->cmb
.mem
.addr
: n
->cmb
.cba
;
339 return &n
->cmb
.buf
[addr
- base
];
342 static bool nvme_addr_is_pmr(NvmeCtrl
*n
, hwaddr addr
)
350 hi
= n
->pmr
.cba
+ int128_get64(n
->pmr
.dev
->mr
.size
);
352 return addr
>= n
->pmr
.cba
&& addr
< hi
;
355 static inline void *nvme_addr_to_pmr(NvmeCtrl
*n
, hwaddr addr
)
357 return memory_region_get_ram_ptr(&n
->pmr
.dev
->mr
) + (addr
- n
->pmr
.cba
);
360 static int nvme_addr_read(NvmeCtrl
*n
, hwaddr addr
, void *buf
, int size
)
362 hwaddr hi
= addr
+ size
- 1;
367 if (n
->bar
.cmbsz
&& nvme_addr_is_cmb(n
, addr
) && nvme_addr_is_cmb(n
, hi
)) {
368 memcpy(buf
, nvme_addr_to_cmb(n
, addr
), size
);
372 if (nvme_addr_is_pmr(n
, addr
) && nvme_addr_is_pmr(n
, hi
)) {
373 memcpy(buf
, nvme_addr_to_pmr(n
, addr
), size
);
377 return pci_dma_read(&n
->parent_obj
, addr
, buf
, size
);
380 static int nvme_addr_write(NvmeCtrl
*n
, hwaddr addr
, void *buf
, int size
)
382 hwaddr hi
= addr
+ size
- 1;
387 if (n
->bar
.cmbsz
&& nvme_addr_is_cmb(n
, addr
) && nvme_addr_is_cmb(n
, hi
)) {
388 memcpy(nvme_addr_to_cmb(n
, addr
), buf
, size
);
392 if (nvme_addr_is_pmr(n
, addr
) && nvme_addr_is_pmr(n
, hi
)) {
393 memcpy(nvme_addr_to_pmr(n
, addr
), buf
, size
);
397 return pci_dma_write(&n
->parent_obj
, addr
, buf
, size
);
400 static bool nvme_nsid_valid(NvmeCtrl
*n
, uint32_t nsid
)
403 (nsid
== NVME_NSID_BROADCAST
|| nsid
<= NVME_MAX_NAMESPACES
);
406 static int nvme_check_sqid(NvmeCtrl
*n
, uint16_t sqid
)
408 return sqid
< n
->params
.max_ioqpairs
+ 1 && n
->sq
[sqid
] != NULL
? 0 : -1;
411 static int nvme_check_cqid(NvmeCtrl
*n
, uint16_t cqid
)
413 return cqid
< n
->params
.max_ioqpairs
+ 1 && n
->cq
[cqid
] != NULL
? 0 : -1;
416 static void nvme_inc_cq_tail(NvmeCQueue
*cq
)
419 if (cq
->tail
>= cq
->size
) {
421 cq
->phase
= !cq
->phase
;
425 static void nvme_inc_sq_head(NvmeSQueue
*sq
)
427 sq
->head
= (sq
->head
+ 1) % sq
->size
;
430 static uint8_t nvme_cq_full(NvmeCQueue
*cq
)
432 return (cq
->tail
+ 1) % cq
->size
== cq
->head
;
435 static uint8_t nvme_sq_empty(NvmeSQueue
*sq
)
437 return sq
->head
== sq
->tail
;
440 static void nvme_irq_check(NvmeCtrl
*n
)
442 if (msix_enabled(&(n
->parent_obj
))) {
445 if (~n
->bar
.intms
& n
->irq_status
) {
446 pci_irq_assert(&n
->parent_obj
);
448 pci_irq_deassert(&n
->parent_obj
);
452 static void nvme_irq_assert(NvmeCtrl
*n
, NvmeCQueue
*cq
)
454 if (cq
->irq_enabled
) {
455 if (msix_enabled(&(n
->parent_obj
))) {
456 trace_pci_nvme_irq_msix(cq
->vector
);
457 msix_notify(&(n
->parent_obj
), cq
->vector
);
459 trace_pci_nvme_irq_pin();
460 assert(cq
->vector
< 32);
461 n
->irq_status
|= 1 << cq
->vector
;
465 trace_pci_nvme_irq_masked();
469 static void nvme_irq_deassert(NvmeCtrl
*n
, NvmeCQueue
*cq
)
471 if (cq
->irq_enabled
) {
472 if (msix_enabled(&(n
->parent_obj
))) {
475 assert(cq
->vector
< 32);
476 if (!n
->cq_pending
) {
477 n
->irq_status
&= ~(1 << cq
->vector
);
484 static void nvme_req_clear(NvmeRequest
*req
)
489 memset(&req
->cqe
, 0x0, sizeof(req
->cqe
));
490 req
->status
= NVME_SUCCESS
;
493 static inline void nvme_sg_init(NvmeCtrl
*n
, NvmeSg
*sg
, bool dma
)
496 pci_dma_sglist_init(&sg
->qsg
, &n
->parent_obj
, 0);
497 sg
->flags
= NVME_SG_DMA
;
499 qemu_iovec_init(&sg
->iov
, 0);
502 sg
->flags
|= NVME_SG_ALLOC
;
505 static inline void nvme_sg_unmap(NvmeSg
*sg
)
507 if (!(sg
->flags
& NVME_SG_ALLOC
)) {
511 if (sg
->flags
& NVME_SG_DMA
) {
512 qemu_sglist_destroy(&sg
->qsg
);
514 qemu_iovec_destroy(&sg
->iov
);
517 memset(sg
, 0x0, sizeof(*sg
));
521 * When metadata is transfered as extended LBAs, the DPTR mapped into `sg`
522 * holds both data and metadata. This function splits the data and metadata
523 * into two separate QSG/IOVs.
525 static void nvme_sg_split(NvmeSg
*sg
, NvmeNamespace
*ns
, NvmeSg
*data
,
529 uint32_t trans_len
, count
= ns
->lbasz
;
531 bool dma
= sg
->flags
& NVME_SG_DMA
;
533 size_t sg_len
= dma
? sg
->qsg
.size
: sg
->iov
.size
;
536 assert(sg
->flags
& NVME_SG_ALLOC
);
539 sge_len
= dma
? sg
->qsg
.sg
[sg_idx
].len
: sg
->iov
.iov
[sg_idx
].iov_len
;
541 trans_len
= MIN(sg_len
, count
);
542 trans_len
= MIN(trans_len
, sge_len
- offset
);
546 qemu_sglist_add(&dst
->qsg
, sg
->qsg
.sg
[sg_idx
].base
+ offset
,
549 qemu_iovec_add(&dst
->iov
,
550 sg
->iov
.iov
[sg_idx
].iov_base
+ offset
,
560 dst
= (dst
== data
) ? mdata
: data
;
561 count
= (dst
== data
) ? ns
->lbasz
: ns
->lbaf
.ms
;
564 if (sge_len
== offset
) {
571 static uint16_t nvme_map_addr_cmb(NvmeCtrl
*n
, QEMUIOVector
*iov
, hwaddr addr
,
578 trace_pci_nvme_map_addr_cmb(addr
, len
);
580 if (!nvme_addr_is_cmb(n
, addr
) || !nvme_addr_is_cmb(n
, addr
+ len
- 1)) {
581 return NVME_DATA_TRAS_ERROR
;
584 qemu_iovec_add(iov
, nvme_addr_to_cmb(n
, addr
), len
);
589 static uint16_t nvme_map_addr_pmr(NvmeCtrl
*n
, QEMUIOVector
*iov
, hwaddr addr
,
596 if (!nvme_addr_is_pmr(n
, addr
) || !nvme_addr_is_pmr(n
, addr
+ len
- 1)) {
597 return NVME_DATA_TRAS_ERROR
;
600 qemu_iovec_add(iov
, nvme_addr_to_pmr(n
, addr
), len
);
605 static uint16_t nvme_map_addr(NvmeCtrl
*n
, NvmeSg
*sg
, hwaddr addr
, size_t len
)
607 bool cmb
= false, pmr
= false;
613 trace_pci_nvme_map_addr(addr
, len
);
615 if (nvme_addr_is_cmb(n
, addr
)) {
617 } else if (nvme_addr_is_pmr(n
, addr
)) {
622 if (sg
->flags
& NVME_SG_DMA
) {
623 return NVME_INVALID_USE_OF_CMB
| NVME_DNR
;
626 if (sg
->iov
.niov
+ 1 > IOV_MAX
) {
627 goto max_mappings_exceeded
;
631 return nvme_map_addr_cmb(n
, &sg
->iov
, addr
, len
);
633 return nvme_map_addr_pmr(n
, &sg
->iov
, addr
, len
);
637 if (!(sg
->flags
& NVME_SG_DMA
)) {
638 return NVME_INVALID_USE_OF_CMB
| NVME_DNR
;
641 if (sg
->qsg
.nsg
+ 1 > IOV_MAX
) {
642 goto max_mappings_exceeded
;
645 qemu_sglist_add(&sg
->qsg
, addr
, len
);
649 max_mappings_exceeded
:
650 NVME_GUEST_ERR(pci_nvme_ub_too_many_mappings
,
651 "number of mappings exceed 1024");
652 return NVME_INTERNAL_DEV_ERROR
| NVME_DNR
;
655 static inline bool nvme_addr_is_dma(NvmeCtrl
*n
, hwaddr addr
)
657 return !(nvme_addr_is_cmb(n
, addr
) || nvme_addr_is_pmr(n
, addr
));
660 static uint16_t nvme_map_prp(NvmeCtrl
*n
, NvmeSg
*sg
, uint64_t prp1
,
661 uint64_t prp2
, uint32_t len
)
663 hwaddr trans_len
= n
->page_size
- (prp1
% n
->page_size
);
664 trans_len
= MIN(len
, trans_len
);
665 int num_prps
= (len
>> n
->page_bits
) + 1;
669 trace_pci_nvme_map_prp(trans_len
, len
, prp1
, prp2
, num_prps
);
671 nvme_sg_init(n
, sg
, nvme_addr_is_dma(n
, prp1
));
673 status
= nvme_map_addr(n
, sg
, prp1
, trans_len
);
680 if (len
> n
->page_size
) {
681 uint64_t prp_list
[n
->max_prp_ents
];
682 uint32_t nents
, prp_trans
;
686 * The first PRP list entry, pointed to by PRP2 may contain offset.
687 * Hence, we need to calculate the number of entries in based on
690 nents
= (n
->page_size
- (prp2
& (n
->page_size
- 1))) >> 3;
691 prp_trans
= MIN(n
->max_prp_ents
, nents
) * sizeof(uint64_t);
692 ret
= nvme_addr_read(n
, prp2
, (void *)prp_list
, prp_trans
);
694 trace_pci_nvme_err_addr_read(prp2
);
695 status
= NVME_DATA_TRAS_ERROR
;
699 uint64_t prp_ent
= le64_to_cpu(prp_list
[i
]);
701 if (i
== nents
- 1 && len
> n
->page_size
) {
702 if (unlikely(prp_ent
& (n
->page_size
- 1))) {
703 trace_pci_nvme_err_invalid_prplist_ent(prp_ent
);
704 status
= NVME_INVALID_PRP_OFFSET
| NVME_DNR
;
709 nents
= (len
+ n
->page_size
- 1) >> n
->page_bits
;
710 nents
= MIN(nents
, n
->max_prp_ents
);
711 prp_trans
= nents
* sizeof(uint64_t);
712 ret
= nvme_addr_read(n
, prp_ent
, (void *)prp_list
,
715 trace_pci_nvme_err_addr_read(prp_ent
);
716 status
= NVME_DATA_TRAS_ERROR
;
719 prp_ent
= le64_to_cpu(prp_list
[i
]);
722 if (unlikely(prp_ent
& (n
->page_size
- 1))) {
723 trace_pci_nvme_err_invalid_prplist_ent(prp_ent
);
724 status
= NVME_INVALID_PRP_OFFSET
| NVME_DNR
;
728 trans_len
= MIN(len
, n
->page_size
);
729 status
= nvme_map_addr(n
, sg
, prp_ent
, trans_len
);
738 if (unlikely(prp2
& (n
->page_size
- 1))) {
739 trace_pci_nvme_err_invalid_prp2_align(prp2
);
740 status
= NVME_INVALID_PRP_OFFSET
| NVME_DNR
;
743 status
= nvme_map_addr(n
, sg
, prp2
, len
);
758 * Map 'nsgld' data descriptors from 'segment'. The function will subtract the
759 * number of bytes mapped in len.
761 static uint16_t nvme_map_sgl_data(NvmeCtrl
*n
, NvmeSg
*sg
,
762 NvmeSglDescriptor
*segment
, uint64_t nsgld
,
763 size_t *len
, NvmeCmd
*cmd
)
765 dma_addr_t addr
, trans_len
;
769 for (int i
= 0; i
< nsgld
; i
++) {
770 uint8_t type
= NVME_SGL_TYPE(segment
[i
].type
);
773 case NVME_SGL_DESCR_TYPE_BIT_BUCKET
:
774 if (cmd
->opcode
== NVME_CMD_WRITE
) {
777 case NVME_SGL_DESCR_TYPE_DATA_BLOCK
:
779 case NVME_SGL_DESCR_TYPE_SEGMENT
:
780 case NVME_SGL_DESCR_TYPE_LAST_SEGMENT
:
781 return NVME_INVALID_NUM_SGL_DESCRS
| NVME_DNR
;
783 return NVME_SGL_DESCR_TYPE_INVALID
| NVME_DNR
;
786 dlen
= le32_to_cpu(segment
[i
].len
);
794 * All data has been mapped, but the SGL contains additional
795 * segments and/or descriptors. The controller might accept
796 * ignoring the rest of the SGL.
798 uint32_t sgls
= le32_to_cpu(n
->id_ctrl
.sgls
);
799 if (sgls
& NVME_CTRL_SGLS_EXCESS_LENGTH
) {
803 trace_pci_nvme_err_invalid_sgl_excess_length(dlen
);
804 return NVME_DATA_SGL_LEN_INVALID
| NVME_DNR
;
807 trans_len
= MIN(*len
, dlen
);
809 if (type
== NVME_SGL_DESCR_TYPE_BIT_BUCKET
) {
813 addr
= le64_to_cpu(segment
[i
].addr
);
815 if (UINT64_MAX
- addr
< dlen
) {
816 return NVME_DATA_SGL_LEN_INVALID
| NVME_DNR
;
819 status
= nvme_map_addr(n
, sg
, addr
, trans_len
);
831 static uint16_t nvme_map_sgl(NvmeCtrl
*n
, NvmeSg
*sg
, NvmeSglDescriptor sgl
,
832 size_t len
, NvmeCmd
*cmd
)
835 * Read the segment in chunks of 256 descriptors (one 4k page) to avoid
836 * dynamically allocating a potentially huge SGL. The spec allows the SGL
837 * to be larger (as in number of bytes required to describe the SGL
838 * descriptors and segment chain) than the command transfer size, so it is
839 * not bounded by MDTS.
841 const int SEG_CHUNK_SIZE
= 256;
843 NvmeSglDescriptor segment
[SEG_CHUNK_SIZE
], *sgld
, *last_sgld
;
851 addr
= le64_to_cpu(sgl
.addr
);
853 trace_pci_nvme_map_sgl(NVME_SGL_TYPE(sgl
.type
), len
);
855 nvme_sg_init(n
, sg
, nvme_addr_is_dma(n
, addr
));
858 * If the entire transfer can be described with a single data block it can
859 * be mapped directly.
861 if (NVME_SGL_TYPE(sgl
.type
) == NVME_SGL_DESCR_TYPE_DATA_BLOCK
) {
862 status
= nvme_map_sgl_data(n
, sg
, sgld
, 1, &len
, cmd
);
871 switch (NVME_SGL_TYPE(sgld
->type
)) {
872 case NVME_SGL_DESCR_TYPE_SEGMENT
:
873 case NVME_SGL_DESCR_TYPE_LAST_SEGMENT
:
876 return NVME_INVALID_SGL_SEG_DESCR
| NVME_DNR
;
879 seg_len
= le32_to_cpu(sgld
->len
);
881 /* check the length of the (Last) Segment descriptor */
882 if ((!seg_len
|| seg_len
& 0xf) &&
883 (NVME_SGL_TYPE(sgld
->type
) != NVME_SGL_DESCR_TYPE_BIT_BUCKET
)) {
884 return NVME_INVALID_SGL_SEG_DESCR
| NVME_DNR
;
887 if (UINT64_MAX
- addr
< seg_len
) {
888 return NVME_DATA_SGL_LEN_INVALID
| NVME_DNR
;
891 nsgld
= seg_len
/ sizeof(NvmeSglDescriptor
);
893 while (nsgld
> SEG_CHUNK_SIZE
) {
894 if (nvme_addr_read(n
, addr
, segment
, sizeof(segment
))) {
895 trace_pci_nvme_err_addr_read(addr
);
896 status
= NVME_DATA_TRAS_ERROR
;
900 status
= nvme_map_sgl_data(n
, sg
, segment
, SEG_CHUNK_SIZE
,
906 nsgld
-= SEG_CHUNK_SIZE
;
907 addr
+= SEG_CHUNK_SIZE
* sizeof(NvmeSglDescriptor
);
910 ret
= nvme_addr_read(n
, addr
, segment
, nsgld
*
911 sizeof(NvmeSglDescriptor
));
913 trace_pci_nvme_err_addr_read(addr
);
914 status
= NVME_DATA_TRAS_ERROR
;
918 last_sgld
= &segment
[nsgld
- 1];
921 * If the segment ends with a Data Block or Bit Bucket Descriptor Type,
924 switch (NVME_SGL_TYPE(last_sgld
->type
)) {
925 case NVME_SGL_DESCR_TYPE_DATA_BLOCK
:
926 case NVME_SGL_DESCR_TYPE_BIT_BUCKET
:
927 status
= nvme_map_sgl_data(n
, sg
, segment
, nsgld
, &len
, cmd
);
939 * If the last descriptor was not a Data Block or Bit Bucket, then the
940 * current segment must not be a Last Segment.
942 if (NVME_SGL_TYPE(sgld
->type
) == NVME_SGL_DESCR_TYPE_LAST_SEGMENT
) {
943 status
= NVME_INVALID_SGL_SEG_DESCR
| NVME_DNR
;
948 addr
= le64_to_cpu(sgld
->addr
);
951 * Do not map the last descriptor; it will be a Segment or Last Segment
952 * descriptor and is handled by the next iteration.
954 status
= nvme_map_sgl_data(n
, sg
, segment
, nsgld
- 1, &len
, cmd
);
961 /* if there is any residual left in len, the SGL was too short */
963 status
= NVME_DATA_SGL_LEN_INVALID
| NVME_DNR
;
974 uint16_t nvme_map_dptr(NvmeCtrl
*n
, NvmeSg
*sg
, size_t len
,
979 switch (NVME_CMD_FLAGS_PSDT(cmd
->flags
)) {
981 prp1
= le64_to_cpu(cmd
->dptr
.prp1
);
982 prp2
= le64_to_cpu(cmd
->dptr
.prp2
);
984 return nvme_map_prp(n
, sg
, prp1
, prp2
, len
);
985 case NVME_PSDT_SGL_MPTR_CONTIGUOUS
:
986 case NVME_PSDT_SGL_MPTR_SGL
:
987 return nvme_map_sgl(n
, sg
, cmd
->dptr
.sgl
, len
, cmd
);
989 return NVME_INVALID_FIELD
;
993 static uint16_t nvme_map_mptr(NvmeCtrl
*n
, NvmeSg
*sg
, size_t len
,
996 int psdt
= NVME_CMD_FLAGS_PSDT(cmd
->flags
);
997 hwaddr mptr
= le64_to_cpu(cmd
->mptr
);
1000 if (psdt
== NVME_PSDT_SGL_MPTR_SGL
) {
1001 NvmeSglDescriptor sgl
;
1003 if (nvme_addr_read(n
, mptr
, &sgl
, sizeof(sgl
))) {
1004 return NVME_DATA_TRAS_ERROR
;
1007 status
= nvme_map_sgl(n
, sg
, sgl
, len
, cmd
);
1008 if (status
&& (status
& 0x7ff) == NVME_DATA_SGL_LEN_INVALID
) {
1009 status
= NVME_MD_SGL_LEN_INVALID
| NVME_DNR
;
1015 nvme_sg_init(n
, sg
, nvme_addr_is_dma(n
, mptr
));
1016 status
= nvme_map_addr(n
, sg
, mptr
, len
);
1024 static uint16_t nvme_map_data(NvmeCtrl
*n
, uint32_t nlb
, NvmeRequest
*req
)
1026 NvmeNamespace
*ns
= req
->ns
;
1027 NvmeRwCmd
*rw
= (NvmeRwCmd
*)&req
->cmd
;
1028 bool pi
= !!NVME_ID_NS_DPS_TYPE(ns
->id_ns
.dps
);
1029 bool pract
= !!(le16_to_cpu(rw
->control
) & NVME_RW_PRINFO_PRACT
);
1030 size_t len
= nvme_l2b(ns
, nlb
);
1033 if (nvme_ns_ext(ns
) && !(pi
&& pract
&& ns
->lbaf
.ms
== 8)) {
1036 len
+= nvme_m2b(ns
, nlb
);
1038 status
= nvme_map_dptr(n
, &sg
, len
, &req
->cmd
);
1043 nvme_sg_init(n
, &req
->sg
, sg
.flags
& NVME_SG_DMA
);
1044 nvme_sg_split(&sg
, ns
, &req
->sg
, NULL
);
1047 return NVME_SUCCESS
;
1050 return nvme_map_dptr(n
, &req
->sg
, len
, &req
->cmd
);
1053 static uint16_t nvme_map_mdata(NvmeCtrl
*n
, uint32_t nlb
, NvmeRequest
*req
)
1055 NvmeNamespace
*ns
= req
->ns
;
1056 size_t len
= nvme_m2b(ns
, nlb
);
1059 if (nvme_ns_ext(ns
)) {
1062 len
+= nvme_l2b(ns
, nlb
);
1064 status
= nvme_map_dptr(n
, &sg
, len
, &req
->cmd
);
1069 nvme_sg_init(n
, &req
->sg
, sg
.flags
& NVME_SG_DMA
);
1070 nvme_sg_split(&sg
, ns
, NULL
, &req
->sg
);
1073 return NVME_SUCCESS
;
1076 return nvme_map_mptr(n
, &req
->sg
, len
, &req
->cmd
);
1079 static uint16_t nvme_tx_interleaved(NvmeCtrl
*n
, NvmeSg
*sg
, uint8_t *ptr
,
1080 uint32_t len
, uint32_t bytes
,
1081 int32_t skip_bytes
, int64_t offset
,
1082 NvmeTxDirection dir
)
1085 uint32_t trans_len
, count
= bytes
;
1086 bool dma
= sg
->flags
& NVME_SG_DMA
;
1091 assert(sg
->flags
& NVME_SG_ALLOC
);
1094 sge_len
= dma
? sg
->qsg
.sg
[sg_idx
].len
: sg
->iov
.iov
[sg_idx
].iov_len
;
1096 if (sge_len
- offset
< 0) {
1102 if (sge_len
== offset
) {
1108 trans_len
= MIN(len
, count
);
1109 trans_len
= MIN(trans_len
, sge_len
- offset
);
1112 addr
= sg
->qsg
.sg
[sg_idx
].base
+ offset
;
1114 addr
= (hwaddr
)(uintptr_t)sg
->iov
.iov
[sg_idx
].iov_base
+ offset
;
1117 if (dir
== NVME_TX_DIRECTION_TO_DEVICE
) {
1118 ret
= nvme_addr_read(n
, addr
, ptr
, trans_len
);
1120 ret
= nvme_addr_write(n
, addr
, ptr
, trans_len
);
1124 return NVME_DATA_TRAS_ERROR
;
1130 offset
+= trans_len
;
1134 offset
+= skip_bytes
;
1138 return NVME_SUCCESS
;
1141 static uint16_t nvme_tx(NvmeCtrl
*n
, NvmeSg
*sg
, uint8_t *ptr
, uint32_t len
,
1142 NvmeTxDirection dir
)
1144 assert(sg
->flags
& NVME_SG_ALLOC
);
1146 if (sg
->flags
& NVME_SG_DMA
) {
1149 if (dir
== NVME_TX_DIRECTION_TO_DEVICE
) {
1150 residual
= dma_buf_write(ptr
, len
, &sg
->qsg
);
1152 residual
= dma_buf_read(ptr
, len
, &sg
->qsg
);
1155 if (unlikely(residual
)) {
1156 trace_pci_nvme_err_invalid_dma();
1157 return NVME_INVALID_FIELD
| NVME_DNR
;
1162 if (dir
== NVME_TX_DIRECTION_TO_DEVICE
) {
1163 bytes
= qemu_iovec_to_buf(&sg
->iov
, 0, ptr
, len
);
1165 bytes
= qemu_iovec_from_buf(&sg
->iov
, 0, ptr
, len
);
1168 if (unlikely(bytes
!= len
)) {
1169 trace_pci_nvme_err_invalid_dma();
1170 return NVME_INVALID_FIELD
| NVME_DNR
;
1174 return NVME_SUCCESS
;
1177 static inline uint16_t nvme_c2h(NvmeCtrl
*n
, uint8_t *ptr
, uint32_t len
,
1182 status
= nvme_map_dptr(n
, &req
->sg
, len
, &req
->cmd
);
1187 return nvme_tx(n
, &req
->sg
, ptr
, len
, NVME_TX_DIRECTION_FROM_DEVICE
);
1190 static inline uint16_t nvme_h2c(NvmeCtrl
*n
, uint8_t *ptr
, uint32_t len
,
1195 status
= nvme_map_dptr(n
, &req
->sg
, len
, &req
->cmd
);
1200 return nvme_tx(n
, &req
->sg
, ptr
, len
, NVME_TX_DIRECTION_TO_DEVICE
);
1203 uint16_t nvme_bounce_data(NvmeCtrl
*n
, uint8_t *ptr
, uint32_t len
,
1204 NvmeTxDirection dir
, NvmeRequest
*req
)
1206 NvmeNamespace
*ns
= req
->ns
;
1207 NvmeRwCmd
*rw
= (NvmeRwCmd
*)&req
->cmd
;
1208 bool pi
= !!NVME_ID_NS_DPS_TYPE(ns
->id_ns
.dps
);
1209 bool pract
= !!(le16_to_cpu(rw
->control
) & NVME_RW_PRINFO_PRACT
);
1211 if (nvme_ns_ext(ns
) && !(pi
&& pract
&& ns
->lbaf
.ms
== 8)) {
1212 return nvme_tx_interleaved(n
, &req
->sg
, ptr
, len
, ns
->lbasz
,
1213 ns
->lbaf
.ms
, 0, dir
);
1216 return nvme_tx(n
, &req
->sg
, ptr
, len
, dir
);
1219 uint16_t nvme_bounce_mdata(NvmeCtrl
*n
, uint8_t *ptr
, uint32_t len
,
1220 NvmeTxDirection dir
, NvmeRequest
*req
)
1222 NvmeNamespace
*ns
= req
->ns
;
1225 if (nvme_ns_ext(ns
)) {
1226 return nvme_tx_interleaved(n
, &req
->sg
, ptr
, len
, ns
->lbaf
.ms
,
1227 ns
->lbasz
, ns
->lbasz
, dir
);
1230 nvme_sg_unmap(&req
->sg
);
1232 status
= nvme_map_mptr(n
, &req
->sg
, len
, &req
->cmd
);
1237 return nvme_tx(n
, &req
->sg
, ptr
, len
, dir
);
1240 static inline void nvme_blk_read(BlockBackend
*blk
, int64_t offset
,
1241 BlockCompletionFunc
*cb
, NvmeRequest
*req
)
1243 assert(req
->sg
.flags
& NVME_SG_ALLOC
);
1245 if (req
->sg
.flags
& NVME_SG_DMA
) {
1246 req
->aiocb
= dma_blk_read(blk
, &req
->sg
.qsg
, offset
, BDRV_SECTOR_SIZE
,
1249 req
->aiocb
= blk_aio_preadv(blk
, offset
, &req
->sg
.iov
, 0, cb
, req
);
1253 static inline void nvme_blk_write(BlockBackend
*blk
, int64_t offset
,
1254 BlockCompletionFunc
*cb
, NvmeRequest
*req
)
1256 assert(req
->sg
.flags
& NVME_SG_ALLOC
);
1258 if (req
->sg
.flags
& NVME_SG_DMA
) {
1259 req
->aiocb
= dma_blk_write(blk
, &req
->sg
.qsg
, offset
, BDRV_SECTOR_SIZE
,
1262 req
->aiocb
= blk_aio_pwritev(blk
, offset
, &req
->sg
.iov
, 0, cb
, req
);
1266 static void nvme_post_cqes(void *opaque
)
1268 NvmeCQueue
*cq
= opaque
;
1269 NvmeCtrl
*n
= cq
->ctrl
;
1270 NvmeRequest
*req
, *next
;
1271 bool pending
= cq
->head
!= cq
->tail
;
1274 QTAILQ_FOREACH_SAFE(req
, &cq
->req_list
, entry
, next
) {
1278 if (nvme_cq_full(cq
)) {
1283 req
->cqe
.status
= cpu_to_le16((req
->status
<< 1) | cq
->phase
);
1284 req
->cqe
.sq_id
= cpu_to_le16(sq
->sqid
);
1285 req
->cqe
.sq_head
= cpu_to_le16(sq
->head
);
1286 addr
= cq
->dma_addr
+ cq
->tail
* n
->cqe_size
;
1287 ret
= pci_dma_write(&n
->parent_obj
, addr
, (void *)&req
->cqe
,
1290 trace_pci_nvme_err_addr_write(addr
);
1291 trace_pci_nvme_err_cfs();
1292 n
->bar
.csts
= NVME_CSTS_FAILED
;
1295 QTAILQ_REMOVE(&cq
->req_list
, req
, entry
);
1296 nvme_inc_cq_tail(cq
);
1297 nvme_sg_unmap(&req
->sg
);
1298 QTAILQ_INSERT_TAIL(&sq
->req_list
, req
, entry
);
1300 if (cq
->tail
!= cq
->head
) {
1301 if (cq
->irq_enabled
&& !pending
) {
1305 nvme_irq_assert(n
, cq
);
1309 static void nvme_enqueue_req_completion(NvmeCQueue
*cq
, NvmeRequest
*req
)
1311 assert(cq
->cqid
== req
->sq
->cqid
);
1312 trace_pci_nvme_enqueue_req_completion(nvme_cid(req
), cq
->cqid
,
1313 le32_to_cpu(req
->cqe
.result
),
1314 le32_to_cpu(req
->cqe
.dw1
),
1318 trace_pci_nvme_err_req_status(nvme_cid(req
), nvme_nsid(req
->ns
),
1319 req
->status
, req
->cmd
.opcode
);
1322 QTAILQ_REMOVE(&req
->sq
->out_req_list
, req
, entry
);
1323 QTAILQ_INSERT_TAIL(&cq
->req_list
, req
, entry
);
1324 timer_mod(cq
->timer
, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) + 500);
1327 static void nvme_process_aers(void *opaque
)
1329 NvmeCtrl
*n
= opaque
;
1330 NvmeAsyncEvent
*event
, *next
;
1332 trace_pci_nvme_process_aers(n
->aer_queued
);
1334 QTAILQ_FOREACH_SAFE(event
, &n
->aer_queue
, entry
, next
) {
1336 NvmeAerResult
*result
;
1338 /* can't post cqe if there is nothing to complete */
1339 if (!n
->outstanding_aers
) {
1340 trace_pci_nvme_no_outstanding_aers();
1344 /* ignore if masked (cqe posted, but event not cleared) */
1345 if (n
->aer_mask
& (1 << event
->result
.event_type
)) {
1346 trace_pci_nvme_aer_masked(event
->result
.event_type
, n
->aer_mask
);
1350 QTAILQ_REMOVE(&n
->aer_queue
, event
, entry
);
1353 n
->aer_mask
|= 1 << event
->result
.event_type
;
1354 n
->outstanding_aers
--;
1356 req
= n
->aer_reqs
[n
->outstanding_aers
];
1358 result
= (NvmeAerResult
*) &req
->cqe
.result
;
1359 result
->event_type
= event
->result
.event_type
;
1360 result
->event_info
= event
->result
.event_info
;
1361 result
->log_page
= event
->result
.log_page
;
1364 trace_pci_nvme_aer_post_cqe(result
->event_type
, result
->event_info
,
1367 nvme_enqueue_req_completion(&n
->admin_cq
, req
);
1371 static void nvme_enqueue_event(NvmeCtrl
*n
, uint8_t event_type
,
1372 uint8_t event_info
, uint8_t log_page
)
1374 NvmeAsyncEvent
*event
;
1376 trace_pci_nvme_enqueue_event(event_type
, event_info
, log_page
);
1378 if (n
->aer_queued
== n
->params
.aer_max_queued
) {
1379 trace_pci_nvme_enqueue_event_noqueue(n
->aer_queued
);
1383 event
= g_new(NvmeAsyncEvent
, 1);
1384 event
->result
= (NvmeAerResult
) {
1385 .event_type
= event_type
,
1386 .event_info
= event_info
,
1387 .log_page
= log_page
,
1390 QTAILQ_INSERT_TAIL(&n
->aer_queue
, event
, entry
);
1393 nvme_process_aers(n
);
1396 static void nvme_smart_event(NvmeCtrl
*n
, uint8_t event
)
1400 /* Ref SPEC <Asynchronous Event Information 0x2013 SMART / Health Status> */
1401 if (!(NVME_AEC_SMART(n
->features
.async_config
) & event
)) {
1406 case NVME_SMART_SPARE
:
1407 aer_info
= NVME_AER_INFO_SMART_SPARE_THRESH
;
1409 case NVME_SMART_TEMPERATURE
:
1410 aer_info
= NVME_AER_INFO_SMART_TEMP_THRESH
;
1412 case NVME_SMART_RELIABILITY
:
1413 case NVME_SMART_MEDIA_READ_ONLY
:
1414 case NVME_SMART_FAILED_VOLATILE_MEDIA
:
1415 case NVME_SMART_PMR_UNRELIABLE
:
1416 aer_info
= NVME_AER_INFO_SMART_RELIABILITY
;
1422 nvme_enqueue_event(n
, NVME_AER_TYPE_SMART
, aer_info
, NVME_LOG_SMART_INFO
);
1425 static void nvme_clear_events(NvmeCtrl
*n
, uint8_t event_type
)
1427 n
->aer_mask
&= ~(1 << event_type
);
1428 if (!QTAILQ_EMPTY(&n
->aer_queue
)) {
1429 nvme_process_aers(n
);
1433 static inline uint16_t nvme_check_mdts(NvmeCtrl
*n
, size_t len
)
1435 uint8_t mdts
= n
->params
.mdts
;
1437 if (mdts
&& len
> n
->page_size
<< mdts
) {
1438 trace_pci_nvme_err_mdts(len
);
1439 return NVME_INVALID_FIELD
| NVME_DNR
;
1442 return NVME_SUCCESS
;
1445 static inline uint16_t nvme_check_bounds(NvmeNamespace
*ns
, uint64_t slba
,
1448 uint64_t nsze
= le64_to_cpu(ns
->id_ns
.nsze
);
1450 if (unlikely(UINT64_MAX
- slba
< nlb
|| slba
+ nlb
> nsze
)) {
1451 trace_pci_nvme_err_invalid_lba_range(slba
, nlb
, nsze
);
1452 return NVME_LBA_RANGE
| NVME_DNR
;
1455 return NVME_SUCCESS
;
1458 static int nvme_block_status_all(NvmeNamespace
*ns
, uint64_t slba
,
1459 uint32_t nlb
, int flags
)
1461 BlockDriverState
*bs
= blk_bs(ns
->blkconf
.blk
);
1463 int64_t pnum
= 0, bytes
= nvme_l2b(ns
, nlb
);
1464 int64_t offset
= nvme_l2b(ns
, slba
);
1468 * `pnum` holds the number of bytes after offset that shares the same
1469 * allocation status as the byte at offset. If `pnum` is different from
1470 * `bytes`, we should check the allocation status of the next range and
1471 * continue this until all bytes have been checked.
1476 ret
= bdrv_block_status(bs
, offset
, bytes
, &pnum
, NULL
, NULL
);
1482 trace_pci_nvme_block_status(offset
, bytes
, pnum
, ret
,
1483 !!(ret
& BDRV_BLOCK_ZERO
));
1485 if (!(ret
& flags
)) {
1490 } while (pnum
!= bytes
);
1495 static uint16_t nvme_check_dulbe(NvmeNamespace
*ns
, uint64_t slba
,
1501 ret
= nvme_block_status_all(ns
, slba
, nlb
, BDRV_BLOCK_DATA
);
1504 error_setg_errno(&err
, -ret
, "unable to get block status");
1505 error_report_err(err
);
1507 return NVME_INTERNAL_DEV_ERROR
;
1513 return NVME_SUCCESS
;
1516 static void nvme_aio_err(NvmeRequest
*req
, int ret
)
1518 uint16_t status
= NVME_SUCCESS
;
1519 Error
*local_err
= NULL
;
1521 switch (req
->cmd
.opcode
) {
1523 status
= NVME_UNRECOVERED_READ
;
1525 case NVME_CMD_FLUSH
:
1526 case NVME_CMD_WRITE
:
1527 case NVME_CMD_WRITE_ZEROES
:
1528 case NVME_CMD_ZONE_APPEND
:
1529 status
= NVME_WRITE_FAULT
;
1532 status
= NVME_INTERNAL_DEV_ERROR
;
1536 trace_pci_nvme_err_aio(nvme_cid(req
), strerror(-ret
), status
);
1538 error_setg_errno(&local_err
, -ret
, "aio failed");
1539 error_report_err(local_err
);
1542 * Set the command status code to the first encountered error but allow a
1543 * subsequent Internal Device Error to trump it.
1545 if (req
->status
&& status
!= NVME_INTERNAL_DEV_ERROR
) {
1549 req
->status
= status
;
1552 static inline uint32_t nvme_zone_idx(NvmeNamespace
*ns
, uint64_t slba
)
1554 return ns
->zone_size_log2
> 0 ? slba
>> ns
->zone_size_log2
:
1555 slba
/ ns
->zone_size
;
1558 static inline NvmeZone
*nvme_get_zone_by_slba(NvmeNamespace
*ns
, uint64_t slba
)
1560 uint32_t zone_idx
= nvme_zone_idx(ns
, slba
);
1562 if (zone_idx
>= ns
->num_zones
) {
1566 return &ns
->zone_array
[zone_idx
];
1569 static uint16_t nvme_check_zone_state_for_write(NvmeZone
*zone
)
1571 uint64_t zslba
= zone
->d
.zslba
;
1573 switch (nvme_get_zone_state(zone
)) {
1574 case NVME_ZONE_STATE_EMPTY
:
1575 case NVME_ZONE_STATE_IMPLICITLY_OPEN
:
1576 case NVME_ZONE_STATE_EXPLICITLY_OPEN
:
1577 case NVME_ZONE_STATE_CLOSED
:
1578 return NVME_SUCCESS
;
1579 case NVME_ZONE_STATE_FULL
:
1580 trace_pci_nvme_err_zone_is_full(zslba
);
1581 return NVME_ZONE_FULL
;
1582 case NVME_ZONE_STATE_OFFLINE
:
1583 trace_pci_nvme_err_zone_is_offline(zslba
);
1584 return NVME_ZONE_OFFLINE
;
1585 case NVME_ZONE_STATE_READ_ONLY
:
1586 trace_pci_nvme_err_zone_is_read_only(zslba
);
1587 return NVME_ZONE_READ_ONLY
;
1592 return NVME_INTERNAL_DEV_ERROR
;
1595 static uint16_t nvme_check_zone_write(NvmeNamespace
*ns
, NvmeZone
*zone
,
1596 uint64_t slba
, uint32_t nlb
)
1598 uint64_t zcap
= nvme_zone_wr_boundary(zone
);
1601 status
= nvme_check_zone_state_for_write(zone
);
1606 if (unlikely(slba
!= zone
->w_ptr
)) {
1607 trace_pci_nvme_err_write_not_at_wp(slba
, zone
->d
.zslba
, zone
->w_ptr
);
1608 return NVME_ZONE_INVALID_WRITE
;
1611 if (unlikely((slba
+ nlb
) > zcap
)) {
1612 trace_pci_nvme_err_zone_boundary(slba
, nlb
, zcap
);
1613 return NVME_ZONE_BOUNDARY_ERROR
;
1616 return NVME_SUCCESS
;
1619 static uint16_t nvme_check_zone_state_for_read(NvmeZone
*zone
)
1621 switch (nvme_get_zone_state(zone
)) {
1622 case NVME_ZONE_STATE_EMPTY
:
1623 case NVME_ZONE_STATE_IMPLICITLY_OPEN
:
1624 case NVME_ZONE_STATE_EXPLICITLY_OPEN
:
1625 case NVME_ZONE_STATE_FULL
:
1626 case NVME_ZONE_STATE_CLOSED
:
1627 case NVME_ZONE_STATE_READ_ONLY
:
1628 return NVME_SUCCESS
;
1629 case NVME_ZONE_STATE_OFFLINE
:
1630 trace_pci_nvme_err_zone_is_offline(zone
->d
.zslba
);
1631 return NVME_ZONE_OFFLINE
;
1636 return NVME_INTERNAL_DEV_ERROR
;
1639 static uint16_t nvme_check_zone_read(NvmeNamespace
*ns
, uint64_t slba
,
1643 uint64_t bndry
, end
;
1646 zone
= nvme_get_zone_by_slba(ns
, slba
);
1649 bndry
= nvme_zone_rd_boundary(ns
, zone
);
1652 status
= nvme_check_zone_state_for_read(zone
);
1655 } else if (unlikely(end
> bndry
)) {
1656 if (!ns
->params
.cross_zone_read
) {
1657 status
= NVME_ZONE_BOUNDARY_ERROR
;
1660 * Read across zone boundary - check that all subsequent
1661 * zones that are being read have an appropriate state.
1665 status
= nvme_check_zone_state_for_read(zone
);
1669 } while (end
> nvme_zone_rd_boundary(ns
, zone
));
1676 static uint16_t nvme_zrm_finish(NvmeNamespace
*ns
, NvmeZone
*zone
)
1678 switch (nvme_get_zone_state(zone
)) {
1679 case NVME_ZONE_STATE_FULL
:
1680 return NVME_SUCCESS
;
1682 case NVME_ZONE_STATE_IMPLICITLY_OPEN
:
1683 case NVME_ZONE_STATE_EXPLICITLY_OPEN
:
1684 nvme_aor_dec_open(ns
);
1686 case NVME_ZONE_STATE_CLOSED
:
1687 nvme_aor_dec_active(ns
);
1689 case NVME_ZONE_STATE_EMPTY
:
1690 nvme_assign_zone_state(ns
, zone
, NVME_ZONE_STATE_FULL
);
1691 return NVME_SUCCESS
;
1694 return NVME_ZONE_INVAL_TRANSITION
;
1698 static uint16_t nvme_zrm_close(NvmeNamespace
*ns
, NvmeZone
*zone
)
1700 switch (nvme_get_zone_state(zone
)) {
1701 case NVME_ZONE_STATE_EXPLICITLY_OPEN
:
1702 case NVME_ZONE_STATE_IMPLICITLY_OPEN
:
1703 nvme_aor_dec_open(ns
);
1704 nvme_assign_zone_state(ns
, zone
, NVME_ZONE_STATE_CLOSED
);
1706 case NVME_ZONE_STATE_CLOSED
:
1707 return NVME_SUCCESS
;
1710 return NVME_ZONE_INVAL_TRANSITION
;
1714 static uint16_t nvme_zrm_reset(NvmeNamespace
*ns
, NvmeZone
*zone
)
1716 switch (nvme_get_zone_state(zone
)) {
1717 case NVME_ZONE_STATE_EXPLICITLY_OPEN
:
1718 case NVME_ZONE_STATE_IMPLICITLY_OPEN
:
1719 nvme_aor_dec_open(ns
);
1721 case NVME_ZONE_STATE_CLOSED
:
1722 nvme_aor_dec_active(ns
);
1724 case NVME_ZONE_STATE_FULL
:
1725 zone
->w_ptr
= zone
->d
.zslba
;
1726 zone
->d
.wp
= zone
->w_ptr
;
1727 nvme_assign_zone_state(ns
, zone
, NVME_ZONE_STATE_EMPTY
);
1729 case NVME_ZONE_STATE_EMPTY
:
1730 return NVME_SUCCESS
;
1733 return NVME_ZONE_INVAL_TRANSITION
;
1737 static void nvme_zrm_auto_transition_zone(NvmeNamespace
*ns
)
1741 if (ns
->params
.max_open_zones
&&
1742 ns
->nr_open_zones
== ns
->params
.max_open_zones
) {
1743 zone
= QTAILQ_FIRST(&ns
->imp_open_zones
);
1746 * Automatically close this implicitly open zone.
1748 QTAILQ_REMOVE(&ns
->imp_open_zones
, zone
, entry
);
1749 nvme_zrm_close(ns
, zone
);
1755 NVME_ZRM_AUTO
= 1 << 0,
1758 static uint16_t nvme_zrm_open_flags(NvmeCtrl
*n
, NvmeNamespace
*ns
,
1759 NvmeZone
*zone
, int flags
)
1764 switch (nvme_get_zone_state(zone
)) {
1765 case NVME_ZONE_STATE_EMPTY
:
1770 case NVME_ZONE_STATE_CLOSED
:
1771 if (n
->params
.auto_transition_zones
) {
1772 nvme_zrm_auto_transition_zone(ns
);
1774 status
= nvme_aor_check(ns
, act
, 1);
1780 nvme_aor_inc_active(ns
);
1783 nvme_aor_inc_open(ns
);
1785 if (flags
& NVME_ZRM_AUTO
) {
1786 nvme_assign_zone_state(ns
, zone
, NVME_ZONE_STATE_IMPLICITLY_OPEN
);
1787 return NVME_SUCCESS
;
1792 case NVME_ZONE_STATE_IMPLICITLY_OPEN
:
1793 if (flags
& NVME_ZRM_AUTO
) {
1794 return NVME_SUCCESS
;
1797 nvme_assign_zone_state(ns
, zone
, NVME_ZONE_STATE_EXPLICITLY_OPEN
);
1801 case NVME_ZONE_STATE_EXPLICITLY_OPEN
:
1802 return NVME_SUCCESS
;
1805 return NVME_ZONE_INVAL_TRANSITION
;
1809 static inline uint16_t nvme_zrm_auto(NvmeCtrl
*n
, NvmeNamespace
*ns
,
1812 return nvme_zrm_open_flags(n
, ns
, zone
, NVME_ZRM_AUTO
);
1815 static inline uint16_t nvme_zrm_open(NvmeCtrl
*n
, NvmeNamespace
*ns
,
1818 return nvme_zrm_open_flags(n
, ns
, zone
, 0);
1821 static void nvme_advance_zone_wp(NvmeNamespace
*ns
, NvmeZone
*zone
,
1826 if (zone
->d
.wp
== nvme_zone_wr_boundary(zone
)) {
1827 nvme_zrm_finish(ns
, zone
);
1831 static void nvme_finalize_zoned_write(NvmeNamespace
*ns
, NvmeRequest
*req
)
1833 NvmeRwCmd
*rw
= (NvmeRwCmd
*)&req
->cmd
;
1838 slba
= le64_to_cpu(rw
->slba
);
1839 nlb
= le16_to_cpu(rw
->nlb
) + 1;
1840 zone
= nvme_get_zone_by_slba(ns
, slba
);
1843 nvme_advance_zone_wp(ns
, zone
, nlb
);
1846 static inline bool nvme_is_write(NvmeRequest
*req
)
1848 NvmeRwCmd
*rw
= (NvmeRwCmd
*)&req
->cmd
;
1850 return rw
->opcode
== NVME_CMD_WRITE
||
1851 rw
->opcode
== NVME_CMD_ZONE_APPEND
||
1852 rw
->opcode
== NVME_CMD_WRITE_ZEROES
;
1855 static AioContext
*nvme_get_aio_context(BlockAIOCB
*acb
)
1857 return qemu_get_aio_context();
1860 static void nvme_misc_cb(void *opaque
, int ret
)
1862 NvmeRequest
*req
= opaque
;
1864 trace_pci_nvme_misc_cb(nvme_cid(req
));
1867 nvme_aio_err(req
, ret
);
1870 nvme_enqueue_req_completion(nvme_cq(req
), req
);
1873 void nvme_rw_complete_cb(void *opaque
, int ret
)
1875 NvmeRequest
*req
= opaque
;
1876 NvmeNamespace
*ns
= req
->ns
;
1877 BlockBackend
*blk
= ns
->blkconf
.blk
;
1878 BlockAcctCookie
*acct
= &req
->acct
;
1879 BlockAcctStats
*stats
= blk_get_stats(blk
);
1881 trace_pci_nvme_rw_complete_cb(nvme_cid(req
), blk_name(blk
));
1884 block_acct_failed(stats
, acct
);
1885 nvme_aio_err(req
, ret
);
1887 block_acct_done(stats
, acct
);
1890 if (ns
->params
.zoned
&& nvme_is_write(req
)) {
1891 nvme_finalize_zoned_write(ns
, req
);
1894 nvme_enqueue_req_completion(nvme_cq(req
), req
);
1897 static void nvme_rw_cb(void *opaque
, int ret
)
1899 NvmeRequest
*req
= opaque
;
1900 NvmeNamespace
*ns
= req
->ns
;
1902 BlockBackend
*blk
= ns
->blkconf
.blk
;
1904 trace_pci_nvme_rw_cb(nvme_cid(req
), blk_name(blk
));
1911 NvmeRwCmd
*rw
= (NvmeRwCmd
*)&req
->cmd
;
1912 uint64_t slba
= le64_to_cpu(rw
->slba
);
1913 uint32_t nlb
= (uint32_t)le16_to_cpu(rw
->nlb
) + 1;
1914 uint64_t offset
= nvme_moff(ns
, slba
);
1916 if (req
->cmd
.opcode
== NVME_CMD_WRITE_ZEROES
) {
1917 size_t mlen
= nvme_m2b(ns
, nlb
);
1919 req
->aiocb
= blk_aio_pwrite_zeroes(blk
, offset
, mlen
,
1921 nvme_rw_complete_cb
, req
);
1925 if (nvme_ns_ext(ns
) || req
->cmd
.mptr
) {
1928 nvme_sg_unmap(&req
->sg
);
1929 status
= nvme_map_mdata(nvme_ctrl(req
), nlb
, req
);
1935 if (req
->cmd
.opcode
== NVME_CMD_READ
) {
1936 return nvme_blk_read(blk
, offset
, nvme_rw_complete_cb
, req
);
1939 return nvme_blk_write(blk
, offset
, nvme_rw_complete_cb
, req
);
1944 nvme_rw_complete_cb(req
, ret
);
1947 static void nvme_verify_cb(void *opaque
, int ret
)
1949 NvmeBounceContext
*ctx
= opaque
;
1950 NvmeRequest
*req
= ctx
->req
;
1951 NvmeNamespace
*ns
= req
->ns
;
1952 BlockBackend
*blk
= ns
->blkconf
.blk
;
1953 BlockAcctCookie
*acct
= &req
->acct
;
1954 BlockAcctStats
*stats
= blk_get_stats(blk
);
1955 NvmeRwCmd
*rw
= (NvmeRwCmd
*)&req
->cmd
;
1956 uint64_t slba
= le64_to_cpu(rw
->slba
);
1957 uint8_t prinfo
= NVME_RW_PRINFO(le16_to_cpu(rw
->control
));
1958 uint16_t apptag
= le16_to_cpu(rw
->apptag
);
1959 uint16_t appmask
= le16_to_cpu(rw
->appmask
);
1960 uint32_t reftag
= le32_to_cpu(rw
->reftag
);
1963 trace_pci_nvme_verify_cb(nvme_cid(req
), prinfo
, apptag
, appmask
, reftag
);
1966 block_acct_failed(stats
, acct
);
1967 nvme_aio_err(req
, ret
);
1971 block_acct_done(stats
, acct
);
1973 if (NVME_ID_NS_DPS_TYPE(ns
->id_ns
.dps
)) {
1974 status
= nvme_dif_mangle_mdata(ns
, ctx
->mdata
.bounce
,
1975 ctx
->mdata
.iov
.size
, slba
);
1977 req
->status
= status
;
1981 req
->status
= nvme_dif_check(ns
, ctx
->data
.bounce
, ctx
->data
.iov
.size
,
1982 ctx
->mdata
.bounce
, ctx
->mdata
.iov
.size
,
1983 prinfo
, slba
, apptag
, appmask
, &reftag
);
1987 qemu_iovec_destroy(&ctx
->data
.iov
);
1988 g_free(ctx
->data
.bounce
);
1990 qemu_iovec_destroy(&ctx
->mdata
.iov
);
1991 g_free(ctx
->mdata
.bounce
);
1995 nvme_enqueue_req_completion(nvme_cq(req
), req
);
1999 static void nvme_verify_mdata_in_cb(void *opaque
, int ret
)
2001 NvmeBounceContext
*ctx
= opaque
;
2002 NvmeRequest
*req
= ctx
->req
;
2003 NvmeNamespace
*ns
= req
->ns
;
2004 NvmeRwCmd
*rw
= (NvmeRwCmd
*)&req
->cmd
;
2005 uint64_t slba
= le64_to_cpu(rw
->slba
);
2006 uint32_t nlb
= le16_to_cpu(rw
->nlb
) + 1;
2007 size_t mlen
= nvme_m2b(ns
, nlb
);
2008 uint64_t offset
= nvme_moff(ns
, slba
);
2009 BlockBackend
*blk
= ns
->blkconf
.blk
;
2011 trace_pci_nvme_verify_mdata_in_cb(nvme_cid(req
), blk_name(blk
));
2017 ctx
->mdata
.bounce
= g_malloc(mlen
);
2019 qemu_iovec_reset(&ctx
->mdata
.iov
);
2020 qemu_iovec_add(&ctx
->mdata
.iov
, ctx
->mdata
.bounce
, mlen
);
2022 req
->aiocb
= blk_aio_preadv(blk
, offset
, &ctx
->mdata
.iov
, 0,
2023 nvme_verify_cb
, ctx
);
2027 nvme_verify_cb(ctx
, ret
);
2030 struct nvme_compare_ctx
{
2042 static void nvme_compare_mdata_cb(void *opaque
, int ret
)
2044 NvmeRequest
*req
= opaque
;
2045 NvmeNamespace
*ns
= req
->ns
;
2046 NvmeCtrl
*n
= nvme_ctrl(req
);
2047 NvmeRwCmd
*rw
= (NvmeRwCmd
*)&req
->cmd
;
2048 uint8_t prinfo
= NVME_RW_PRINFO(le16_to_cpu(rw
->control
));
2049 uint16_t apptag
= le16_to_cpu(rw
->apptag
);
2050 uint16_t appmask
= le16_to_cpu(rw
->appmask
);
2051 uint32_t reftag
= le32_to_cpu(rw
->reftag
);
2052 struct nvme_compare_ctx
*ctx
= req
->opaque
;
2053 g_autofree
uint8_t *buf
= NULL
;
2054 BlockBackend
*blk
= ns
->blkconf
.blk
;
2055 BlockAcctCookie
*acct
= &req
->acct
;
2056 BlockAcctStats
*stats
= blk_get_stats(blk
);
2057 uint16_t status
= NVME_SUCCESS
;
2059 trace_pci_nvme_compare_mdata_cb(nvme_cid(req
));
2062 block_acct_failed(stats
, acct
);
2063 nvme_aio_err(req
, ret
);
2067 buf
= g_malloc(ctx
->mdata
.iov
.size
);
2069 status
= nvme_bounce_mdata(n
, buf
, ctx
->mdata
.iov
.size
,
2070 NVME_TX_DIRECTION_TO_DEVICE
, req
);
2072 req
->status
= status
;
2076 if (NVME_ID_NS_DPS_TYPE(ns
->id_ns
.dps
)) {
2077 uint64_t slba
= le64_to_cpu(rw
->slba
);
2079 uint8_t *mbufp
= ctx
->mdata
.bounce
;
2080 uint8_t *end
= mbufp
+ ctx
->mdata
.iov
.size
;
2083 status
= nvme_dif_check(ns
, ctx
->data
.bounce
, ctx
->data
.iov
.size
,
2084 ctx
->mdata
.bounce
, ctx
->mdata
.iov
.size
, prinfo
,
2085 slba
, apptag
, appmask
, &reftag
);
2087 req
->status
= status
;
2092 * When formatted with protection information, do not compare the DIF
2095 if (!(ns
->id_ns
.dps
& NVME_ID_NS_DPS_FIRST_EIGHT
)) {
2096 pil
= ns
->lbaf
.ms
- sizeof(NvmeDifTuple
);
2099 for (bufp
= buf
; mbufp
< end
; bufp
+= ns
->lbaf
.ms
, mbufp
+= ns
->lbaf
.ms
) {
2100 if (memcmp(bufp
+ pil
, mbufp
+ pil
, ns
->lbaf
.ms
- pil
)) {
2101 req
->status
= NVME_CMP_FAILURE
;
2109 if (memcmp(buf
, ctx
->mdata
.bounce
, ctx
->mdata
.iov
.size
)) {
2110 req
->status
= NVME_CMP_FAILURE
;
2114 block_acct_done(stats
, acct
);
2117 qemu_iovec_destroy(&ctx
->data
.iov
);
2118 g_free(ctx
->data
.bounce
);
2120 qemu_iovec_destroy(&ctx
->mdata
.iov
);
2121 g_free(ctx
->mdata
.bounce
);
2125 nvme_enqueue_req_completion(nvme_cq(req
), req
);
2128 static void nvme_compare_data_cb(void *opaque
, int ret
)
2130 NvmeRequest
*req
= opaque
;
2131 NvmeCtrl
*n
= nvme_ctrl(req
);
2132 NvmeNamespace
*ns
= req
->ns
;
2133 BlockBackend
*blk
= ns
->blkconf
.blk
;
2134 BlockAcctCookie
*acct
= &req
->acct
;
2135 BlockAcctStats
*stats
= blk_get_stats(blk
);
2137 struct nvme_compare_ctx
*ctx
= req
->opaque
;
2138 g_autofree
uint8_t *buf
= NULL
;
2141 trace_pci_nvme_compare_data_cb(nvme_cid(req
));
2144 block_acct_failed(stats
, acct
);
2145 nvme_aio_err(req
, ret
);
2149 buf
= g_malloc(ctx
->data
.iov
.size
);
2151 status
= nvme_bounce_data(n
, buf
, ctx
->data
.iov
.size
,
2152 NVME_TX_DIRECTION_TO_DEVICE
, req
);
2154 req
->status
= status
;
2158 if (memcmp(buf
, ctx
->data
.bounce
, ctx
->data
.iov
.size
)) {
2159 req
->status
= NVME_CMP_FAILURE
;
2164 NvmeRwCmd
*rw
= (NvmeRwCmd
*)&req
->cmd
;
2165 uint64_t slba
= le64_to_cpu(rw
->slba
);
2166 uint32_t nlb
= le16_to_cpu(rw
->nlb
) + 1;
2167 size_t mlen
= nvme_m2b(ns
, nlb
);
2168 uint64_t offset
= nvme_moff(ns
, slba
);
2170 ctx
->mdata
.bounce
= g_malloc(mlen
);
2172 qemu_iovec_init(&ctx
->mdata
.iov
, 1);
2173 qemu_iovec_add(&ctx
->mdata
.iov
, ctx
->mdata
.bounce
, mlen
);
2175 req
->aiocb
= blk_aio_preadv(blk
, offset
, &ctx
->mdata
.iov
, 0,
2176 nvme_compare_mdata_cb
, req
);
2180 block_acct_done(stats
, acct
);
2183 qemu_iovec_destroy(&ctx
->data
.iov
);
2184 g_free(ctx
->data
.bounce
);
2187 nvme_enqueue_req_completion(nvme_cq(req
), req
);
2190 typedef struct NvmeDSMAIOCB
{
2197 NvmeDsmRange
*range
;
2202 static void nvme_dsm_cancel(BlockAIOCB
*aiocb
)
2204 NvmeDSMAIOCB
*iocb
= container_of(aiocb
, NvmeDSMAIOCB
, common
);
2206 /* break nvme_dsm_cb loop */
2207 iocb
->idx
= iocb
->nr
;
2208 iocb
->ret
= -ECANCELED
;
2211 blk_aio_cancel_async(iocb
->aiocb
);
2215 * We only reach this if nvme_dsm_cancel() has already been called or
2216 * the command ran to completion and nvme_dsm_bh is scheduled to run.
2218 assert(iocb
->idx
== iocb
->nr
);
2222 static const AIOCBInfo nvme_dsm_aiocb_info
= {
2223 .aiocb_size
= sizeof(NvmeDSMAIOCB
),
2224 .cancel_async
= nvme_dsm_cancel
,
2227 static void nvme_dsm_bh(void *opaque
)
2229 NvmeDSMAIOCB
*iocb
= opaque
;
2231 iocb
->common
.cb(iocb
->common
.opaque
, iocb
->ret
);
2233 qemu_bh_delete(iocb
->bh
);
2235 qemu_aio_unref(iocb
);
2238 static void nvme_dsm_cb(void *opaque
, int ret
);
2240 static void nvme_dsm_md_cb(void *opaque
, int ret
)
2242 NvmeDSMAIOCB
*iocb
= opaque
;
2243 NvmeRequest
*req
= iocb
->req
;
2244 NvmeNamespace
*ns
= req
->ns
;
2245 NvmeDsmRange
*range
;
2255 nvme_dsm_cb(iocb
, 0);
2259 range
= &iocb
->range
[iocb
->idx
- 1];
2260 slba
= le64_to_cpu(range
->slba
);
2261 nlb
= le32_to_cpu(range
->nlb
);
2264 * Check that all block were discarded (zeroed); otherwise we do not zero
2268 ret
= nvme_block_status_all(ns
, slba
, nlb
, BDRV_BLOCK_ZERO
);
2275 nvme_dsm_cb(iocb
, 0);
2278 iocb
->aiocb
= blk_aio_pwrite_zeroes(ns
->blkconf
.blk
, nvme_moff(ns
, slba
),
2279 nvme_m2b(ns
, nlb
), BDRV_REQ_MAY_UNMAP
,
2285 qemu_bh_schedule(iocb
->bh
);
2288 static void nvme_dsm_cb(void *opaque
, int ret
)
2290 NvmeDSMAIOCB
*iocb
= opaque
;
2291 NvmeRequest
*req
= iocb
->req
;
2292 NvmeCtrl
*n
= nvme_ctrl(req
);
2293 NvmeNamespace
*ns
= req
->ns
;
2294 NvmeDsmRange
*range
;
2304 if (iocb
->idx
== iocb
->nr
) {
2308 range
= &iocb
->range
[iocb
->idx
++];
2309 slba
= le64_to_cpu(range
->slba
);
2310 nlb
= le32_to_cpu(range
->nlb
);
2312 trace_pci_nvme_dsm_deallocate(slba
, nlb
);
2314 if (nlb
> n
->dmrsl
) {
2315 trace_pci_nvme_dsm_single_range_limit_exceeded(nlb
, n
->dmrsl
);
2319 if (nvme_check_bounds(ns
, slba
, nlb
)) {
2320 trace_pci_nvme_err_invalid_lba_range(slba
, nlb
,
2325 iocb
->aiocb
= blk_aio_pdiscard(ns
->blkconf
.blk
, nvme_l2b(ns
, slba
),
2327 nvme_dsm_md_cb
, iocb
);
2332 qemu_bh_schedule(iocb
->bh
);
2335 static uint16_t nvme_dsm(NvmeCtrl
*n
, NvmeRequest
*req
)
2337 NvmeNamespace
*ns
= req
->ns
;
2338 NvmeDsmCmd
*dsm
= (NvmeDsmCmd
*) &req
->cmd
;
2339 uint32_t attr
= le32_to_cpu(dsm
->attributes
);
2340 uint32_t nr
= (le32_to_cpu(dsm
->nr
) & 0xff) + 1;
2341 uint16_t status
= NVME_SUCCESS
;
2343 trace_pci_nvme_dsm(nr
, attr
);
2345 if (attr
& NVME_DSMGMT_AD
) {
2346 NvmeDSMAIOCB
*iocb
= blk_aio_get(&nvme_dsm_aiocb_info
, ns
->blkconf
.blk
,
2350 iocb
->bh
= qemu_bh_new(nvme_dsm_bh
, iocb
);
2352 iocb
->range
= g_new(NvmeDsmRange
, nr
);
2356 status
= nvme_h2c(n
, (uint8_t *)iocb
->range
, sizeof(NvmeDsmRange
) * nr
,
2362 req
->aiocb
= &iocb
->common
;
2363 nvme_dsm_cb(iocb
, 0);
2365 return NVME_NO_COMPLETE
;
2371 static uint16_t nvme_verify(NvmeCtrl
*n
, NvmeRequest
*req
)
2373 NvmeRwCmd
*rw
= (NvmeRwCmd
*)&req
->cmd
;
2374 NvmeNamespace
*ns
= req
->ns
;
2375 BlockBackend
*blk
= ns
->blkconf
.blk
;
2376 uint64_t slba
= le64_to_cpu(rw
->slba
);
2377 uint32_t nlb
= le16_to_cpu(rw
->nlb
) + 1;
2378 size_t len
= nvme_l2b(ns
, nlb
);
2379 int64_t offset
= nvme_l2b(ns
, slba
);
2380 uint8_t prinfo
= NVME_RW_PRINFO(le16_to_cpu(rw
->control
));
2381 uint32_t reftag
= le32_to_cpu(rw
->reftag
);
2382 NvmeBounceContext
*ctx
= NULL
;
2385 trace_pci_nvme_verify(nvme_cid(req
), nvme_nsid(ns
), slba
, nlb
);
2387 if (NVME_ID_NS_DPS_TYPE(ns
->id_ns
.dps
)) {
2388 status
= nvme_check_prinfo(ns
, prinfo
, slba
, reftag
);
2393 if (prinfo
& NVME_PRINFO_PRACT
) {
2394 return NVME_INVALID_PROT_INFO
| NVME_DNR
;
2398 if (len
> n
->page_size
<< n
->params
.vsl
) {
2399 return NVME_INVALID_FIELD
| NVME_DNR
;
2402 status
= nvme_check_bounds(ns
, slba
, nlb
);
2407 if (NVME_ERR_REC_DULBE(ns
->features
.err_rec
)) {
2408 status
= nvme_check_dulbe(ns
, slba
, nlb
);
2414 ctx
= g_new0(NvmeBounceContext
, 1);
2417 ctx
->data
.bounce
= g_malloc(len
);
2419 qemu_iovec_init(&ctx
->data
.iov
, 1);
2420 qemu_iovec_add(&ctx
->data
.iov
, ctx
->data
.bounce
, len
);
2422 block_acct_start(blk_get_stats(blk
), &req
->acct
, ctx
->data
.iov
.size
,
2425 req
->aiocb
= blk_aio_preadv(ns
->blkconf
.blk
, offset
, &ctx
->data
.iov
, 0,
2426 nvme_verify_mdata_in_cb
, ctx
);
2427 return NVME_NO_COMPLETE
;
2430 typedef struct NvmeCopyAIOCB
{
2437 NvmeCopySourceRange
*ranges
;
2444 BlockAcctCookie read
;
2445 BlockAcctCookie write
;
2454 static void nvme_copy_cancel(BlockAIOCB
*aiocb
)
2456 NvmeCopyAIOCB
*iocb
= container_of(aiocb
, NvmeCopyAIOCB
, common
);
2458 iocb
->ret
= -ECANCELED
;
2461 blk_aio_cancel_async(iocb
->aiocb
);
2466 static const AIOCBInfo nvme_copy_aiocb_info
= {
2467 .aiocb_size
= sizeof(NvmeCopyAIOCB
),
2468 .cancel_async
= nvme_copy_cancel
,
2471 static void nvme_copy_bh(void *opaque
)
2473 NvmeCopyAIOCB
*iocb
= opaque
;
2474 NvmeRequest
*req
= iocb
->req
;
2475 NvmeNamespace
*ns
= req
->ns
;
2476 BlockAcctStats
*stats
= blk_get_stats(ns
->blkconf
.blk
);
2478 if (iocb
->idx
!= iocb
->nr
) {
2479 req
->cqe
.result
= cpu_to_le32(iocb
->idx
);
2482 qemu_iovec_destroy(&iocb
->iov
);
2483 g_free(iocb
->bounce
);
2485 qemu_bh_delete(iocb
->bh
);
2488 if (iocb
->ret
< 0) {
2489 block_acct_failed(stats
, &iocb
->acct
.read
);
2490 block_acct_failed(stats
, &iocb
->acct
.write
);
2492 block_acct_done(stats
, &iocb
->acct
.read
);
2493 block_acct_done(stats
, &iocb
->acct
.write
);
2496 iocb
->common
.cb(iocb
->common
.opaque
, iocb
->ret
);
2497 qemu_aio_unref(iocb
);
2500 static void nvme_copy_cb(void *opaque
, int ret
);
2502 static void nvme_copy_out_completed_cb(void *opaque
, int ret
)
2504 NvmeCopyAIOCB
*iocb
= opaque
;
2505 NvmeRequest
*req
= iocb
->req
;
2506 NvmeNamespace
*ns
= req
->ns
;
2507 NvmeCopySourceRange
*range
= &iocb
->ranges
[iocb
->idx
];
2508 uint32_t nlb
= le32_to_cpu(range
->nlb
) + 1;
2513 } else if (iocb
->ret
< 0) {
2517 if (ns
->params
.zoned
) {
2518 nvme_advance_zone_wp(ns
, iocb
->zone
, nlb
);
2524 nvme_copy_cb(iocb
, iocb
->ret
);
2527 static void nvme_copy_out_cb(void *opaque
, int ret
)
2529 NvmeCopyAIOCB
*iocb
= opaque
;
2530 NvmeRequest
*req
= iocb
->req
;
2531 NvmeNamespace
*ns
= req
->ns
;
2532 NvmeCopySourceRange
*range
;
2540 } else if (iocb
->ret
< 0) {
2545 nvme_copy_out_completed_cb(iocb
, 0);
2549 range
= &iocb
->ranges
[iocb
->idx
];
2550 nlb
= le32_to_cpu(range
->nlb
) + 1;
2552 mlen
= nvme_m2b(ns
, nlb
);
2553 mbounce
= iocb
->bounce
+ nvme_l2b(ns
, nlb
);
2555 qemu_iovec_reset(&iocb
->iov
);
2556 qemu_iovec_add(&iocb
->iov
, mbounce
, mlen
);
2558 iocb
->aiocb
= blk_aio_pwritev(ns
->blkconf
.blk
, nvme_moff(ns
, iocb
->slba
),
2559 &iocb
->iov
, 0, nvme_copy_out_completed_cb
,
2565 nvme_copy_cb(iocb
, ret
);
2568 static void nvme_copy_in_completed_cb(void *opaque
, int ret
)
2570 NvmeCopyAIOCB
*iocb
= opaque
;
2571 NvmeRequest
*req
= iocb
->req
;
2572 NvmeNamespace
*ns
= req
->ns
;
2573 NvmeCopySourceRange
*range
;
2581 } else if (iocb
->ret
< 0) {
2585 range
= &iocb
->ranges
[iocb
->idx
];
2586 nlb
= le32_to_cpu(range
->nlb
) + 1;
2587 len
= nvme_l2b(ns
, nlb
);
2589 trace_pci_nvme_copy_out(iocb
->slba
, nlb
);
2591 if (NVME_ID_NS_DPS_TYPE(ns
->id_ns
.dps
)) {
2592 NvmeCopyCmd
*copy
= (NvmeCopyCmd
*)&req
->cmd
;
2594 uint16_t prinfor
= ((copy
->control
[0] >> 4) & 0xf);
2595 uint16_t prinfow
= ((copy
->control
[2] >> 2) & 0xf);
2597 uint16_t apptag
= le16_to_cpu(range
->apptag
);
2598 uint16_t appmask
= le16_to_cpu(range
->appmask
);
2599 uint32_t reftag
= le32_to_cpu(range
->reftag
);
2601 uint64_t slba
= le64_to_cpu(range
->slba
);
2602 size_t mlen
= nvme_m2b(ns
, nlb
);
2603 uint8_t *mbounce
= iocb
->bounce
+ nvme_l2b(ns
, nlb
);
2605 status
= nvme_dif_check(ns
, iocb
->bounce
, len
, mbounce
, mlen
, prinfor
,
2606 slba
, apptag
, appmask
, &reftag
);
2611 apptag
= le16_to_cpu(copy
->apptag
);
2612 appmask
= le16_to_cpu(copy
->appmask
);
2614 if (prinfow
& NVME_PRINFO_PRACT
) {
2615 status
= nvme_check_prinfo(ns
, prinfow
, iocb
->slba
, iocb
->reftag
);
2620 nvme_dif_pract_generate_dif(ns
, iocb
->bounce
, len
, mbounce
, mlen
,
2621 apptag
, &iocb
->reftag
);
2623 status
= nvme_dif_check(ns
, iocb
->bounce
, len
, mbounce
, mlen
,
2624 prinfow
, iocb
->slba
, apptag
, appmask
,
2632 status
= nvme_check_bounds(ns
, iocb
->slba
, nlb
);
2637 if (ns
->params
.zoned
) {
2638 status
= nvme_check_zone_write(ns
, iocb
->zone
, iocb
->slba
, nlb
);
2643 iocb
->zone
->w_ptr
+= nlb
;
2646 qemu_iovec_reset(&iocb
->iov
);
2647 qemu_iovec_add(&iocb
->iov
, iocb
->bounce
, len
);
2649 iocb
->aiocb
= blk_aio_pwritev(ns
->blkconf
.blk
, nvme_l2b(ns
, iocb
->slba
),
2650 &iocb
->iov
, 0, nvme_copy_out_cb
, iocb
);
2655 req
->status
= status
;
2658 qemu_bh_schedule(iocb
->bh
);
2664 nvme_copy_cb(iocb
, ret
);
2667 static void nvme_copy_in_cb(void *opaque
, int ret
)
2669 NvmeCopyAIOCB
*iocb
= opaque
;
2670 NvmeRequest
*req
= iocb
->req
;
2671 NvmeNamespace
*ns
= req
->ns
;
2672 NvmeCopySourceRange
*range
;
2679 } else if (iocb
->ret
< 0) {
2684 nvme_copy_in_completed_cb(iocb
, 0);
2688 range
= &iocb
->ranges
[iocb
->idx
];
2689 slba
= le64_to_cpu(range
->slba
);
2690 nlb
= le32_to_cpu(range
->nlb
) + 1;
2692 qemu_iovec_reset(&iocb
->iov
);
2693 qemu_iovec_add(&iocb
->iov
, iocb
->bounce
+ nvme_l2b(ns
, nlb
),
2696 iocb
->aiocb
= blk_aio_preadv(ns
->blkconf
.blk
, nvme_moff(ns
, slba
),
2697 &iocb
->iov
, 0, nvme_copy_in_completed_cb
,
2702 nvme_copy_cb(iocb
, iocb
->ret
);
2705 static void nvme_copy_cb(void *opaque
, int ret
)
2707 NvmeCopyAIOCB
*iocb
= opaque
;
2708 NvmeRequest
*req
= iocb
->req
;
2709 NvmeNamespace
*ns
= req
->ns
;
2710 NvmeCopySourceRange
*range
;
2719 } else if (iocb
->ret
< 0) {
2723 if (iocb
->idx
== iocb
->nr
) {
2727 range
= &iocb
->ranges
[iocb
->idx
];
2728 slba
= le64_to_cpu(range
->slba
);
2729 nlb
= le32_to_cpu(range
->nlb
) + 1;
2730 len
= nvme_l2b(ns
, nlb
);
2732 trace_pci_nvme_copy_source_range(slba
, nlb
);
2734 if (nlb
> le16_to_cpu(ns
->id_ns
.mssrl
)) {
2735 status
= NVME_CMD_SIZE_LIMIT
| NVME_DNR
;
2739 status
= nvme_check_bounds(ns
, slba
, nlb
);
2744 if (NVME_ERR_REC_DULBE(ns
->features
.err_rec
)) {
2745 status
= nvme_check_dulbe(ns
, slba
, nlb
);
2751 if (ns
->params
.zoned
) {
2752 status
= nvme_check_zone_read(ns
, slba
, nlb
);
2758 qemu_iovec_reset(&iocb
->iov
);
2759 qemu_iovec_add(&iocb
->iov
, iocb
->bounce
, len
);
2761 iocb
->aiocb
= blk_aio_preadv(ns
->blkconf
.blk
, nvme_l2b(ns
, slba
),
2762 &iocb
->iov
, 0, nvme_copy_in_cb
, iocb
);
2766 req
->status
= status
;
2770 qemu_bh_schedule(iocb
->bh
);
2775 static uint16_t nvme_copy(NvmeCtrl
*n
, NvmeRequest
*req
)
2777 NvmeNamespace
*ns
= req
->ns
;
2778 NvmeCopyCmd
*copy
= (NvmeCopyCmd
*)&req
->cmd
;
2779 NvmeCopyAIOCB
*iocb
= blk_aio_get(&nvme_copy_aiocb_info
, ns
->blkconf
.blk
,
2781 uint16_t nr
= copy
->nr
+ 1;
2782 uint8_t format
= copy
->control
[0] & 0xf;
2783 uint16_t prinfor
= ((copy
->control
[0] >> 4) & 0xf);
2784 uint16_t prinfow
= ((copy
->control
[2] >> 2) & 0xf);
2788 trace_pci_nvme_copy(nvme_cid(req
), nvme_nsid(ns
), nr
, format
);
2790 iocb
->ranges
= NULL
;
2793 if (NVME_ID_NS_DPS_TYPE(ns
->id_ns
.dps
) &&
2794 ((prinfor
& NVME_PRINFO_PRACT
) != (prinfow
& NVME_PRINFO_PRACT
))) {
2795 status
= NVME_INVALID_FIELD
| NVME_DNR
;
2799 if (!(n
->id_ctrl
.ocfs
& (1 << format
))) {
2800 trace_pci_nvme_err_copy_invalid_format(format
);
2801 status
= NVME_INVALID_FIELD
| NVME_DNR
;
2805 if (nr
> ns
->id_ns
.msrc
+ 1) {
2806 status
= NVME_CMD_SIZE_LIMIT
| NVME_DNR
;
2810 iocb
->ranges
= g_new(NvmeCopySourceRange
, nr
);
2812 status
= nvme_h2c(n
, (uint8_t *)iocb
->ranges
,
2813 sizeof(NvmeCopySourceRange
) * nr
, req
);
2818 iocb
->slba
= le64_to_cpu(copy
->sdlba
);
2820 if (ns
->params
.zoned
) {
2821 iocb
->zone
= nvme_get_zone_by_slba(ns
, iocb
->slba
);
2823 status
= NVME_LBA_RANGE
| NVME_DNR
;
2827 status
= nvme_zrm_auto(n
, ns
, iocb
->zone
);
2834 iocb
->bh
= qemu_bh_new(nvme_copy_bh
, iocb
);
2838 iocb
->reftag
= le32_to_cpu(copy
->reftag
);
2839 iocb
->bounce
= g_malloc_n(le16_to_cpu(ns
->id_ns
.mssrl
),
2840 ns
->lbasz
+ ns
->lbaf
.ms
);
2842 qemu_iovec_init(&iocb
->iov
, 1);
2844 block_acct_start(blk_get_stats(ns
->blkconf
.blk
), &iocb
->acct
.read
, 0,
2846 block_acct_start(blk_get_stats(ns
->blkconf
.blk
), &iocb
->acct
.write
, 0,
2849 req
->aiocb
= &iocb
->common
;
2850 nvme_copy_cb(iocb
, 0);
2852 return NVME_NO_COMPLETE
;
2855 g_free(iocb
->ranges
);
2856 qemu_aio_unref(iocb
);
2860 static uint16_t nvme_compare(NvmeCtrl
*n
, NvmeRequest
*req
)
2862 NvmeRwCmd
*rw
= (NvmeRwCmd
*)&req
->cmd
;
2863 NvmeNamespace
*ns
= req
->ns
;
2864 BlockBackend
*blk
= ns
->blkconf
.blk
;
2865 uint64_t slba
= le64_to_cpu(rw
->slba
);
2866 uint32_t nlb
= le16_to_cpu(rw
->nlb
) + 1;
2867 uint8_t prinfo
= NVME_RW_PRINFO(le16_to_cpu(rw
->control
));
2868 size_t data_len
= nvme_l2b(ns
, nlb
);
2869 size_t len
= data_len
;
2870 int64_t offset
= nvme_l2b(ns
, slba
);
2871 struct nvme_compare_ctx
*ctx
= NULL
;
2874 trace_pci_nvme_compare(nvme_cid(req
), nvme_nsid(ns
), slba
, nlb
);
2876 if (NVME_ID_NS_DPS_TYPE(ns
->id_ns
.dps
) && (prinfo
& NVME_PRINFO_PRACT
)) {
2877 return NVME_INVALID_PROT_INFO
| NVME_DNR
;
2880 if (nvme_ns_ext(ns
)) {
2881 len
+= nvme_m2b(ns
, nlb
);
2884 status
= nvme_check_mdts(n
, len
);
2889 status
= nvme_check_bounds(ns
, slba
, nlb
);
2894 if (NVME_ERR_REC_DULBE(ns
->features
.err_rec
)) {
2895 status
= nvme_check_dulbe(ns
, slba
, nlb
);
2901 status
= nvme_map_dptr(n
, &req
->sg
, len
, &req
->cmd
);
2906 ctx
= g_new(struct nvme_compare_ctx
, 1);
2907 ctx
->data
.bounce
= g_malloc(data_len
);
2911 qemu_iovec_init(&ctx
->data
.iov
, 1);
2912 qemu_iovec_add(&ctx
->data
.iov
, ctx
->data
.bounce
, data_len
);
2914 block_acct_start(blk_get_stats(blk
), &req
->acct
, data_len
,
2916 req
->aiocb
= blk_aio_preadv(blk
, offset
, &ctx
->data
.iov
, 0,
2917 nvme_compare_data_cb
, req
);
2919 return NVME_NO_COMPLETE
;
2922 typedef struct NvmeFlushAIOCB
{
2934 static void nvme_flush_cancel(BlockAIOCB
*acb
)
2936 NvmeFlushAIOCB
*iocb
= container_of(acb
, NvmeFlushAIOCB
, common
);
2938 iocb
->ret
= -ECANCELED
;
2941 blk_aio_cancel_async(iocb
->aiocb
);
2945 static const AIOCBInfo nvme_flush_aiocb_info
= {
2946 .aiocb_size
= sizeof(NvmeFlushAIOCB
),
2947 .cancel_async
= nvme_flush_cancel
,
2948 .get_aio_context
= nvme_get_aio_context
,
2951 static void nvme_flush_ns_cb(void *opaque
, int ret
)
2953 NvmeFlushAIOCB
*iocb
= opaque
;
2954 NvmeNamespace
*ns
= iocb
->ns
;
2959 } else if (iocb
->ret
< 0) {
2964 trace_pci_nvme_flush_ns(iocb
->nsid
);
2967 iocb
->aiocb
= blk_aio_flush(ns
->blkconf
.blk
, nvme_flush_ns_cb
, iocb
);
2973 qemu_bh_schedule(iocb
->bh
);
2976 static void nvme_flush_bh(void *opaque
)
2978 NvmeFlushAIOCB
*iocb
= opaque
;
2979 NvmeRequest
*req
= iocb
->req
;
2980 NvmeCtrl
*n
= nvme_ctrl(req
);
2983 if (iocb
->ret
< 0) {
2987 if (iocb
->broadcast
) {
2988 for (i
= iocb
->nsid
+ 1; i
<= NVME_MAX_NAMESPACES
; i
++) {
2989 iocb
->ns
= nvme_ns(n
, i
);
3001 nvme_flush_ns_cb(iocb
, 0);
3005 qemu_bh_delete(iocb
->bh
);
3008 iocb
->common
.cb(iocb
->common
.opaque
, iocb
->ret
);
3010 qemu_aio_unref(iocb
);
3015 static uint16_t nvme_flush(NvmeCtrl
*n
, NvmeRequest
*req
)
3017 NvmeFlushAIOCB
*iocb
;
3018 uint32_t nsid
= le32_to_cpu(req
->cmd
.nsid
);
3021 iocb
= qemu_aio_get(&nvme_flush_aiocb_info
, NULL
, nvme_misc_cb
, req
);
3024 iocb
->bh
= qemu_bh_new(nvme_flush_bh
, iocb
);
3028 iocb
->broadcast
= (nsid
== NVME_NSID_BROADCAST
);
3030 if (!iocb
->broadcast
) {
3031 if (!nvme_nsid_valid(n
, nsid
)) {
3032 status
= NVME_INVALID_NSID
| NVME_DNR
;
3036 iocb
->ns
= nvme_ns(n
, nsid
);
3038 status
= NVME_INVALID_FIELD
| NVME_DNR
;
3045 req
->aiocb
= &iocb
->common
;
3046 qemu_bh_schedule(iocb
->bh
);
3048 return NVME_NO_COMPLETE
;
3051 qemu_bh_delete(iocb
->bh
);
3053 qemu_aio_unref(iocb
);
3058 static uint16_t nvme_read(NvmeCtrl
*n
, NvmeRequest
*req
)
3060 NvmeRwCmd
*rw
= (NvmeRwCmd
*)&req
->cmd
;
3061 NvmeNamespace
*ns
= req
->ns
;
3062 uint64_t slba
= le64_to_cpu(rw
->slba
);
3063 uint32_t nlb
= (uint32_t)le16_to_cpu(rw
->nlb
) + 1;
3064 uint8_t prinfo
= NVME_RW_PRINFO(le16_to_cpu(rw
->control
));
3065 uint64_t data_size
= nvme_l2b(ns
, nlb
);
3066 uint64_t mapped_size
= data_size
;
3067 uint64_t data_offset
;
3068 BlockBackend
*blk
= ns
->blkconf
.blk
;
3071 if (nvme_ns_ext(ns
)) {
3072 mapped_size
+= nvme_m2b(ns
, nlb
);
3074 if (NVME_ID_NS_DPS_TYPE(ns
->id_ns
.dps
)) {
3075 bool pract
= prinfo
& NVME_PRINFO_PRACT
;
3077 if (pract
&& ns
->lbaf
.ms
== 8) {
3078 mapped_size
= data_size
;
3083 trace_pci_nvme_read(nvme_cid(req
), nvme_nsid(ns
), nlb
, mapped_size
, slba
);
3085 status
= nvme_check_mdts(n
, mapped_size
);
3090 status
= nvme_check_bounds(ns
, slba
, nlb
);
3095 if (ns
->params
.zoned
) {
3096 status
= nvme_check_zone_read(ns
, slba
, nlb
);
3098 trace_pci_nvme_err_zone_read_not_ok(slba
, nlb
, status
);
3103 if (NVME_ERR_REC_DULBE(ns
->features
.err_rec
)) {
3104 status
= nvme_check_dulbe(ns
, slba
, nlb
);
3110 if (NVME_ID_NS_DPS_TYPE(ns
->id_ns
.dps
)) {
3111 return nvme_dif_rw(n
, req
);
3114 status
= nvme_map_data(n
, nlb
, req
);
3119 data_offset
= nvme_l2b(ns
, slba
);
3121 block_acct_start(blk_get_stats(blk
), &req
->acct
, data_size
,
3123 nvme_blk_read(blk
, data_offset
, nvme_rw_cb
, req
);
3124 return NVME_NO_COMPLETE
;
3127 block_acct_invalid(blk_get_stats(blk
), BLOCK_ACCT_READ
);
3128 return status
| NVME_DNR
;
3131 static uint16_t nvme_do_write(NvmeCtrl
*n
, NvmeRequest
*req
, bool append
,
3134 NvmeRwCmd
*rw
= (NvmeRwCmd
*)&req
->cmd
;
3135 NvmeNamespace
*ns
= req
->ns
;
3136 uint64_t slba
= le64_to_cpu(rw
->slba
);
3137 uint32_t nlb
= (uint32_t)le16_to_cpu(rw
->nlb
) + 1;
3138 uint16_t ctrl
= le16_to_cpu(rw
->control
);
3139 uint8_t prinfo
= NVME_RW_PRINFO(ctrl
);
3140 uint64_t data_size
= nvme_l2b(ns
, nlb
);
3141 uint64_t mapped_size
= data_size
;
3142 uint64_t data_offset
;
3144 NvmeZonedResult
*res
= (NvmeZonedResult
*)&req
->cqe
;
3145 BlockBackend
*blk
= ns
->blkconf
.blk
;
3148 if (nvme_ns_ext(ns
)) {
3149 mapped_size
+= nvme_m2b(ns
, nlb
);
3151 if (NVME_ID_NS_DPS_TYPE(ns
->id_ns
.dps
)) {
3152 bool pract
= prinfo
& NVME_PRINFO_PRACT
;
3154 if (pract
&& ns
->lbaf
.ms
== 8) {
3155 mapped_size
-= nvme_m2b(ns
, nlb
);
3160 trace_pci_nvme_write(nvme_cid(req
), nvme_io_opc_str(rw
->opcode
),
3161 nvme_nsid(ns
), nlb
, mapped_size
, slba
);
3164 status
= nvme_check_mdts(n
, mapped_size
);
3170 status
= nvme_check_bounds(ns
, slba
, nlb
);
3175 if (ns
->params
.zoned
) {
3176 zone
= nvme_get_zone_by_slba(ns
, slba
);
3180 bool piremap
= !!(ctrl
& NVME_RW_PIREMAP
);
3182 if (unlikely(slba
!= zone
->d
.zslba
)) {
3183 trace_pci_nvme_err_append_not_at_start(slba
, zone
->d
.zslba
);
3184 status
= NVME_INVALID_FIELD
;
3188 if (n
->params
.zasl
&&
3189 data_size
> (uint64_t)n
->page_size
<< n
->params
.zasl
) {
3190 trace_pci_nvme_err_zasl(data_size
);
3191 return NVME_INVALID_FIELD
| NVME_DNR
;
3195 rw
->slba
= cpu_to_le64(slba
);
3196 res
->slba
= cpu_to_le64(slba
);
3198 switch (NVME_ID_NS_DPS_TYPE(ns
->id_ns
.dps
)) {
3199 case NVME_ID_NS_DPS_TYPE_1
:
3201 return NVME_INVALID_PROT_INFO
| NVME_DNR
;
3206 case NVME_ID_NS_DPS_TYPE_2
:
3208 uint32_t reftag
= le32_to_cpu(rw
->reftag
);
3209 rw
->reftag
= cpu_to_le32(reftag
+ (slba
- zone
->d
.zslba
));
3214 case NVME_ID_NS_DPS_TYPE_3
:
3216 return NVME_INVALID_PROT_INFO
| NVME_DNR
;
3223 status
= nvme_check_zone_write(ns
, zone
, slba
, nlb
);
3228 status
= nvme_zrm_auto(n
, ns
, zone
);
3236 data_offset
= nvme_l2b(ns
, slba
);
3238 if (NVME_ID_NS_DPS_TYPE(ns
->id_ns
.dps
)) {
3239 return nvme_dif_rw(n
, req
);
3243 status
= nvme_map_data(n
, nlb
, req
);
3248 block_acct_start(blk_get_stats(blk
), &req
->acct
, data_size
,
3250 nvme_blk_write(blk
, data_offset
, nvme_rw_cb
, req
);
3252 req
->aiocb
= blk_aio_pwrite_zeroes(blk
, data_offset
, data_size
,
3253 BDRV_REQ_MAY_UNMAP
, nvme_rw_cb
,
3257 return NVME_NO_COMPLETE
;
3260 block_acct_invalid(blk_get_stats(blk
), BLOCK_ACCT_WRITE
);
3261 return status
| NVME_DNR
;
3264 static inline uint16_t nvme_write(NvmeCtrl
*n
, NvmeRequest
*req
)
3266 return nvme_do_write(n
, req
, false, false);
3269 static inline uint16_t nvme_write_zeroes(NvmeCtrl
*n
, NvmeRequest
*req
)
3271 return nvme_do_write(n
, req
, false, true);
3274 static inline uint16_t nvme_zone_append(NvmeCtrl
*n
, NvmeRequest
*req
)
3276 return nvme_do_write(n
, req
, true, false);
3279 static uint16_t nvme_get_mgmt_zone_slba_idx(NvmeNamespace
*ns
, NvmeCmd
*c
,
3280 uint64_t *slba
, uint32_t *zone_idx
)
3282 uint32_t dw10
= le32_to_cpu(c
->cdw10
);
3283 uint32_t dw11
= le32_to_cpu(c
->cdw11
);
3285 if (!ns
->params
.zoned
) {
3286 trace_pci_nvme_err_invalid_opc(c
->opcode
);
3287 return NVME_INVALID_OPCODE
| NVME_DNR
;
3290 *slba
= ((uint64_t)dw11
) << 32 | dw10
;
3291 if (unlikely(*slba
>= ns
->id_ns
.nsze
)) {
3292 trace_pci_nvme_err_invalid_lba_range(*slba
, 0, ns
->id_ns
.nsze
);
3294 return NVME_LBA_RANGE
| NVME_DNR
;
3297 *zone_idx
= nvme_zone_idx(ns
, *slba
);
3298 assert(*zone_idx
< ns
->num_zones
);
3300 return NVME_SUCCESS
;
3303 typedef uint16_t (*op_handler_t
)(NvmeNamespace
*, NvmeZone
*, NvmeZoneState
,
3306 enum NvmeZoneProcessingMask
{
3307 NVME_PROC_CURRENT_ZONE
= 0,
3308 NVME_PROC_OPENED_ZONES
= 1 << 0,
3309 NVME_PROC_CLOSED_ZONES
= 1 << 1,
3310 NVME_PROC_READ_ONLY_ZONES
= 1 << 2,
3311 NVME_PROC_FULL_ZONES
= 1 << 3,
3314 static uint16_t nvme_open_zone(NvmeNamespace
*ns
, NvmeZone
*zone
,
3315 NvmeZoneState state
, NvmeRequest
*req
)
3317 return nvme_zrm_open(nvme_ctrl(req
), ns
, zone
);
3320 static uint16_t nvme_close_zone(NvmeNamespace
*ns
, NvmeZone
*zone
,
3321 NvmeZoneState state
, NvmeRequest
*req
)
3323 return nvme_zrm_close(ns
, zone
);
3326 static uint16_t nvme_finish_zone(NvmeNamespace
*ns
, NvmeZone
*zone
,
3327 NvmeZoneState state
, NvmeRequest
*req
)
3329 return nvme_zrm_finish(ns
, zone
);
3332 static uint16_t nvme_offline_zone(NvmeNamespace
*ns
, NvmeZone
*zone
,
3333 NvmeZoneState state
, NvmeRequest
*req
)
3336 case NVME_ZONE_STATE_READ_ONLY
:
3337 nvme_assign_zone_state(ns
, zone
, NVME_ZONE_STATE_OFFLINE
);
3339 case NVME_ZONE_STATE_OFFLINE
:
3340 return NVME_SUCCESS
;
3342 return NVME_ZONE_INVAL_TRANSITION
;
3346 static uint16_t nvme_set_zd_ext(NvmeNamespace
*ns
, NvmeZone
*zone
)
3349 uint8_t state
= nvme_get_zone_state(zone
);
3351 if (state
== NVME_ZONE_STATE_EMPTY
) {
3352 status
= nvme_aor_check(ns
, 1, 0);
3356 nvme_aor_inc_active(ns
);
3357 zone
->d
.za
|= NVME_ZA_ZD_EXT_VALID
;
3358 nvme_assign_zone_state(ns
, zone
, NVME_ZONE_STATE_CLOSED
);
3359 return NVME_SUCCESS
;
3362 return NVME_ZONE_INVAL_TRANSITION
;
3365 static uint16_t nvme_bulk_proc_zone(NvmeNamespace
*ns
, NvmeZone
*zone
,
3366 enum NvmeZoneProcessingMask proc_mask
,
3367 op_handler_t op_hndlr
, NvmeRequest
*req
)
3369 uint16_t status
= NVME_SUCCESS
;
3370 NvmeZoneState zs
= nvme_get_zone_state(zone
);
3374 case NVME_ZONE_STATE_IMPLICITLY_OPEN
:
3375 case NVME_ZONE_STATE_EXPLICITLY_OPEN
:
3376 proc_zone
= proc_mask
& NVME_PROC_OPENED_ZONES
;
3378 case NVME_ZONE_STATE_CLOSED
:
3379 proc_zone
= proc_mask
& NVME_PROC_CLOSED_ZONES
;
3381 case NVME_ZONE_STATE_READ_ONLY
:
3382 proc_zone
= proc_mask
& NVME_PROC_READ_ONLY_ZONES
;
3384 case NVME_ZONE_STATE_FULL
:
3385 proc_zone
= proc_mask
& NVME_PROC_FULL_ZONES
;
3392 status
= op_hndlr(ns
, zone
, zs
, req
);
3398 static uint16_t nvme_do_zone_op(NvmeNamespace
*ns
, NvmeZone
*zone
,
3399 enum NvmeZoneProcessingMask proc_mask
,
3400 op_handler_t op_hndlr
, NvmeRequest
*req
)
3403 uint16_t status
= NVME_SUCCESS
;
3407 status
= op_hndlr(ns
, zone
, nvme_get_zone_state(zone
), req
);
3409 if (proc_mask
& NVME_PROC_CLOSED_ZONES
) {
3410 QTAILQ_FOREACH_SAFE(zone
, &ns
->closed_zones
, entry
, next
) {
3411 status
= nvme_bulk_proc_zone(ns
, zone
, proc_mask
, op_hndlr
,
3413 if (status
&& status
!= NVME_NO_COMPLETE
) {
3418 if (proc_mask
& NVME_PROC_OPENED_ZONES
) {
3419 QTAILQ_FOREACH_SAFE(zone
, &ns
->imp_open_zones
, entry
, next
) {
3420 status
= nvme_bulk_proc_zone(ns
, zone
, proc_mask
, op_hndlr
,
3422 if (status
&& status
!= NVME_NO_COMPLETE
) {
3427 QTAILQ_FOREACH_SAFE(zone
, &ns
->exp_open_zones
, entry
, next
) {
3428 status
= nvme_bulk_proc_zone(ns
, zone
, proc_mask
, op_hndlr
,
3430 if (status
&& status
!= NVME_NO_COMPLETE
) {
3435 if (proc_mask
& NVME_PROC_FULL_ZONES
) {
3436 QTAILQ_FOREACH_SAFE(zone
, &ns
->full_zones
, entry
, next
) {
3437 status
= nvme_bulk_proc_zone(ns
, zone
, proc_mask
, op_hndlr
,
3439 if (status
&& status
!= NVME_NO_COMPLETE
) {
3445 if (proc_mask
& NVME_PROC_READ_ONLY_ZONES
) {
3446 for (i
= 0; i
< ns
->num_zones
; i
++, zone
++) {
3447 status
= nvme_bulk_proc_zone(ns
, zone
, proc_mask
, op_hndlr
,
3449 if (status
&& status
!= NVME_NO_COMPLETE
) {
3460 typedef struct NvmeZoneResetAIOCB
{
3470 } NvmeZoneResetAIOCB
;
3472 static void nvme_zone_reset_cancel(BlockAIOCB
*aiocb
)
3474 NvmeZoneResetAIOCB
*iocb
= container_of(aiocb
, NvmeZoneResetAIOCB
, common
);
3475 NvmeRequest
*req
= iocb
->req
;
3476 NvmeNamespace
*ns
= req
->ns
;
3478 iocb
->idx
= ns
->num_zones
;
3480 iocb
->ret
= -ECANCELED
;
3483 blk_aio_cancel_async(iocb
->aiocb
);
3488 static const AIOCBInfo nvme_zone_reset_aiocb_info
= {
3489 .aiocb_size
= sizeof(NvmeZoneResetAIOCB
),
3490 .cancel_async
= nvme_zone_reset_cancel
,
3493 static void nvme_zone_reset_bh(void *opaque
)
3495 NvmeZoneResetAIOCB
*iocb
= opaque
;
3497 iocb
->common
.cb(iocb
->common
.opaque
, iocb
->ret
);
3499 qemu_bh_delete(iocb
->bh
);
3501 qemu_aio_unref(iocb
);
3504 static void nvme_zone_reset_cb(void *opaque
, int ret
);
3506 static void nvme_zone_reset_epilogue_cb(void *opaque
, int ret
)
3508 NvmeZoneResetAIOCB
*iocb
= opaque
;
3509 NvmeRequest
*req
= iocb
->req
;
3510 NvmeNamespace
*ns
= req
->ns
;
3515 nvme_zone_reset_cb(iocb
, ret
);
3520 nvme_zone_reset_cb(iocb
, 0);
3524 moff
= nvme_moff(ns
, iocb
->zone
->d
.zslba
);
3525 count
= nvme_m2b(ns
, ns
->zone_size
);
3527 iocb
->aiocb
= blk_aio_pwrite_zeroes(ns
->blkconf
.blk
, moff
, count
,
3529 nvme_zone_reset_cb
, iocb
);
3533 static void nvme_zone_reset_cb(void *opaque
, int ret
)
3535 NvmeZoneResetAIOCB
*iocb
= opaque
;
3536 NvmeRequest
*req
= iocb
->req
;
3537 NvmeNamespace
*ns
= req
->ns
;
3545 nvme_zrm_reset(ns
, iocb
->zone
);
3552 while (iocb
->idx
< ns
->num_zones
) {
3553 NvmeZone
*zone
= &ns
->zone_array
[iocb
->idx
++];
3555 switch (nvme_get_zone_state(zone
)) {
3556 case NVME_ZONE_STATE_EMPTY
:
3563 case NVME_ZONE_STATE_EXPLICITLY_OPEN
:
3564 case NVME_ZONE_STATE_IMPLICITLY_OPEN
:
3565 case NVME_ZONE_STATE_CLOSED
:
3566 case NVME_ZONE_STATE_FULL
:
3574 trace_pci_nvme_zns_zone_reset(zone
->d
.zslba
);
3576 iocb
->aiocb
= blk_aio_pwrite_zeroes(ns
->blkconf
.blk
,
3577 nvme_l2b(ns
, zone
->d
.zslba
),
3578 nvme_l2b(ns
, ns
->zone_size
),
3580 nvme_zone_reset_epilogue_cb
,
3588 qemu_bh_schedule(iocb
->bh
);
3592 static uint16_t nvme_zone_mgmt_send(NvmeCtrl
*n
, NvmeRequest
*req
)
3594 NvmeCmd
*cmd
= (NvmeCmd
*)&req
->cmd
;
3595 NvmeNamespace
*ns
= req
->ns
;
3597 NvmeZoneResetAIOCB
*iocb
;
3599 uint32_t dw13
= le32_to_cpu(cmd
->cdw13
);
3601 uint32_t zone_idx
= 0;
3605 enum NvmeZoneProcessingMask proc_mask
= NVME_PROC_CURRENT_ZONE
;
3607 action
= dw13
& 0xff;
3608 all
= !!(dw13
& 0x100);
3610 req
->status
= NVME_SUCCESS
;
3613 status
= nvme_get_mgmt_zone_slba_idx(ns
, cmd
, &slba
, &zone_idx
);
3619 zone
= &ns
->zone_array
[zone_idx
];
3620 if (slba
!= zone
->d
.zslba
) {
3621 trace_pci_nvme_err_unaligned_zone_cmd(action
, slba
, zone
->d
.zslba
);
3622 return NVME_INVALID_FIELD
| NVME_DNR
;
3627 case NVME_ZONE_ACTION_OPEN
:
3629 proc_mask
= NVME_PROC_CLOSED_ZONES
;
3631 trace_pci_nvme_open_zone(slba
, zone_idx
, all
);
3632 status
= nvme_do_zone_op(ns
, zone
, proc_mask
, nvme_open_zone
, req
);
3635 case NVME_ZONE_ACTION_CLOSE
:
3637 proc_mask
= NVME_PROC_OPENED_ZONES
;
3639 trace_pci_nvme_close_zone(slba
, zone_idx
, all
);
3640 status
= nvme_do_zone_op(ns
, zone
, proc_mask
, nvme_close_zone
, req
);
3643 case NVME_ZONE_ACTION_FINISH
:
3645 proc_mask
= NVME_PROC_OPENED_ZONES
| NVME_PROC_CLOSED_ZONES
;
3647 trace_pci_nvme_finish_zone(slba
, zone_idx
, all
);
3648 status
= nvme_do_zone_op(ns
, zone
, proc_mask
, nvme_finish_zone
, req
);
3651 case NVME_ZONE_ACTION_RESET
:
3652 trace_pci_nvme_reset_zone(slba
, zone_idx
, all
);
3654 iocb
= blk_aio_get(&nvme_zone_reset_aiocb_info
, ns
->blkconf
.blk
,
3658 iocb
->bh
= qemu_bh_new(nvme_zone_reset_bh
, iocb
);
3661 iocb
->idx
= zone_idx
;
3664 req
->aiocb
= &iocb
->common
;
3665 nvme_zone_reset_cb(iocb
, 0);
3667 return NVME_NO_COMPLETE
;
3669 case NVME_ZONE_ACTION_OFFLINE
:
3671 proc_mask
= NVME_PROC_READ_ONLY_ZONES
;
3673 trace_pci_nvme_offline_zone(slba
, zone_idx
, all
);
3674 status
= nvme_do_zone_op(ns
, zone
, proc_mask
, nvme_offline_zone
, req
);
3677 case NVME_ZONE_ACTION_SET_ZD_EXT
:
3678 trace_pci_nvme_set_descriptor_extension(slba
, zone_idx
);
3679 if (all
|| !ns
->params
.zd_extension_size
) {
3680 return NVME_INVALID_FIELD
| NVME_DNR
;
3682 zd_ext
= nvme_get_zd_extension(ns
, zone_idx
);
3683 status
= nvme_h2c(n
, zd_ext
, ns
->params
.zd_extension_size
, req
);
3685 trace_pci_nvme_err_zd_extension_map_error(zone_idx
);
3689 status
= nvme_set_zd_ext(ns
, zone
);
3690 if (status
== NVME_SUCCESS
) {
3691 trace_pci_nvme_zd_extension_set(zone_idx
);
3697 trace_pci_nvme_err_invalid_mgmt_action(action
);
3698 status
= NVME_INVALID_FIELD
;
3701 if (status
== NVME_ZONE_INVAL_TRANSITION
) {
3702 trace_pci_nvme_err_invalid_zone_state_transition(action
, slba
,
3712 static bool nvme_zone_matches_filter(uint32_t zafs
, NvmeZone
*zl
)
3714 NvmeZoneState zs
= nvme_get_zone_state(zl
);
3717 case NVME_ZONE_REPORT_ALL
:
3719 case NVME_ZONE_REPORT_EMPTY
:
3720 return zs
== NVME_ZONE_STATE_EMPTY
;
3721 case NVME_ZONE_REPORT_IMPLICITLY_OPEN
:
3722 return zs
== NVME_ZONE_STATE_IMPLICITLY_OPEN
;
3723 case NVME_ZONE_REPORT_EXPLICITLY_OPEN
:
3724 return zs
== NVME_ZONE_STATE_EXPLICITLY_OPEN
;
3725 case NVME_ZONE_REPORT_CLOSED
:
3726 return zs
== NVME_ZONE_STATE_CLOSED
;
3727 case NVME_ZONE_REPORT_FULL
:
3728 return zs
== NVME_ZONE_STATE_FULL
;
3729 case NVME_ZONE_REPORT_READ_ONLY
:
3730 return zs
== NVME_ZONE_STATE_READ_ONLY
;
3731 case NVME_ZONE_REPORT_OFFLINE
:
3732 return zs
== NVME_ZONE_STATE_OFFLINE
;
3738 static uint16_t nvme_zone_mgmt_recv(NvmeCtrl
*n
, NvmeRequest
*req
)
3740 NvmeCmd
*cmd
= (NvmeCmd
*)&req
->cmd
;
3741 NvmeNamespace
*ns
= req
->ns
;
3742 /* cdw12 is zero-based number of dwords to return. Convert to bytes */
3743 uint32_t data_size
= (le32_to_cpu(cmd
->cdw12
) + 1) << 2;
3744 uint32_t dw13
= le32_to_cpu(cmd
->cdw13
);
3745 uint32_t zone_idx
, zra
, zrasf
, partial
;
3746 uint64_t max_zones
, nr_zones
= 0;
3751 NvmeZoneReportHeader
*header
;
3753 size_t zone_entry_sz
;
3756 req
->status
= NVME_SUCCESS
;
3758 status
= nvme_get_mgmt_zone_slba_idx(ns
, cmd
, &slba
, &zone_idx
);
3764 if (zra
!= NVME_ZONE_REPORT
&& zra
!= NVME_ZONE_REPORT_EXTENDED
) {
3765 return NVME_INVALID_FIELD
| NVME_DNR
;
3767 if (zra
== NVME_ZONE_REPORT_EXTENDED
&& !ns
->params
.zd_extension_size
) {
3768 return NVME_INVALID_FIELD
| NVME_DNR
;
3771 zrasf
= (dw13
>> 8) & 0xff;
3772 if (zrasf
> NVME_ZONE_REPORT_OFFLINE
) {
3773 return NVME_INVALID_FIELD
| NVME_DNR
;
3776 if (data_size
< sizeof(NvmeZoneReportHeader
)) {
3777 return NVME_INVALID_FIELD
| NVME_DNR
;
3780 status
= nvme_check_mdts(n
, data_size
);
3785 partial
= (dw13
>> 16) & 0x01;
3787 zone_entry_sz
= sizeof(NvmeZoneDescr
);
3788 if (zra
== NVME_ZONE_REPORT_EXTENDED
) {
3789 zone_entry_sz
+= ns
->params
.zd_extension_size
;
3792 max_zones
= (data_size
- sizeof(NvmeZoneReportHeader
)) / zone_entry_sz
;
3793 buf
= g_malloc0(data_size
);
3795 zone
= &ns
->zone_array
[zone_idx
];
3796 for (i
= zone_idx
; i
< ns
->num_zones
; i
++) {
3797 if (partial
&& nr_zones
>= max_zones
) {
3800 if (nvme_zone_matches_filter(zrasf
, zone
++)) {
3804 header
= (NvmeZoneReportHeader
*)buf
;
3805 header
->nr_zones
= cpu_to_le64(nr_zones
);
3807 buf_p
= buf
+ sizeof(NvmeZoneReportHeader
);
3808 for (; zone_idx
< ns
->num_zones
&& max_zones
> 0; zone_idx
++) {
3809 zone
= &ns
->zone_array
[zone_idx
];
3810 if (nvme_zone_matches_filter(zrasf
, zone
)) {
3811 z
= (NvmeZoneDescr
*)buf_p
;
3812 buf_p
+= sizeof(NvmeZoneDescr
);
3816 z
->zcap
= cpu_to_le64(zone
->d
.zcap
);
3817 z
->zslba
= cpu_to_le64(zone
->d
.zslba
);
3820 if (nvme_wp_is_valid(zone
)) {
3821 z
->wp
= cpu_to_le64(zone
->d
.wp
);
3823 z
->wp
= cpu_to_le64(~0ULL);
3826 if (zra
== NVME_ZONE_REPORT_EXTENDED
) {
3827 if (zone
->d
.za
& NVME_ZA_ZD_EXT_VALID
) {
3828 memcpy(buf_p
, nvme_get_zd_extension(ns
, zone_idx
),
3829 ns
->params
.zd_extension_size
);
3831 buf_p
+= ns
->params
.zd_extension_size
;
3838 status
= nvme_c2h(n
, (uint8_t *)buf
, data_size
, req
);
3845 static uint16_t nvme_io_cmd(NvmeCtrl
*n
, NvmeRequest
*req
)
3848 uint32_t nsid
= le32_to_cpu(req
->cmd
.nsid
);
3850 trace_pci_nvme_io_cmd(nvme_cid(req
), nsid
, nvme_sqid(req
),
3851 req
->cmd
.opcode
, nvme_io_opc_str(req
->cmd
.opcode
));
3853 if (!nvme_nsid_valid(n
, nsid
)) {
3854 return NVME_INVALID_NSID
| NVME_DNR
;
3858 * In the base NVM command set, Flush may apply to all namespaces
3859 * (indicated by NSID being set to FFFFFFFFh). But if that feature is used
3860 * along with TP 4056 (Namespace Types), it may be pretty screwed up.
3862 * If NSID is indeed set to FFFFFFFFh, we simply cannot associate the
3863 * opcode with a specific command since we cannot determine a unique I/O
3864 * command set. Opcode 0h could have any other meaning than something
3865 * equivalent to flushing and say it DOES have completely different
3866 * semantics in some other command set - does an NSID of FFFFFFFFh then
3867 * mean "for all namespaces, apply whatever command set specific command
3868 * that uses the 0h opcode?" Or does it mean "for all namespaces, apply
3869 * whatever command that uses the 0h opcode if, and only if, it allows NSID
3872 * Anyway (and luckily), for now, we do not care about this since the
3873 * device only supports namespace types that includes the NVM Flush command
3874 * (NVM and Zoned), so always do an NVM Flush.
3876 if (req
->cmd
.opcode
== NVME_CMD_FLUSH
) {
3877 return nvme_flush(n
, req
);
3880 ns
= nvme_ns(n
, nsid
);
3881 if (unlikely(!ns
)) {
3882 return NVME_INVALID_FIELD
| NVME_DNR
;
3885 if (!(ns
->iocs
[req
->cmd
.opcode
] & NVME_CMD_EFF_CSUPP
)) {
3886 trace_pci_nvme_err_invalid_opc(req
->cmd
.opcode
);
3887 return NVME_INVALID_OPCODE
| NVME_DNR
;
3896 switch (req
->cmd
.opcode
) {
3897 case NVME_CMD_WRITE_ZEROES
:
3898 return nvme_write_zeroes(n
, req
);
3899 case NVME_CMD_ZONE_APPEND
:
3900 return nvme_zone_append(n
, req
);
3901 case NVME_CMD_WRITE
:
3902 return nvme_write(n
, req
);
3904 return nvme_read(n
, req
);
3905 case NVME_CMD_COMPARE
:
3906 return nvme_compare(n
, req
);
3908 return nvme_dsm(n
, req
);
3909 case NVME_CMD_VERIFY
:
3910 return nvme_verify(n
, req
);
3912 return nvme_copy(n
, req
);
3913 case NVME_CMD_ZONE_MGMT_SEND
:
3914 return nvme_zone_mgmt_send(n
, req
);
3915 case NVME_CMD_ZONE_MGMT_RECV
:
3916 return nvme_zone_mgmt_recv(n
, req
);
3921 return NVME_INVALID_OPCODE
| NVME_DNR
;
3924 static void nvme_free_sq(NvmeSQueue
*sq
, NvmeCtrl
*n
)
3926 n
->sq
[sq
->sqid
] = NULL
;
3927 timer_free(sq
->timer
);
3934 static uint16_t nvme_del_sq(NvmeCtrl
*n
, NvmeRequest
*req
)
3936 NvmeDeleteQ
*c
= (NvmeDeleteQ
*)&req
->cmd
;
3937 NvmeRequest
*r
, *next
;
3940 uint16_t qid
= le16_to_cpu(c
->qid
);
3942 if (unlikely(!qid
|| nvme_check_sqid(n
, qid
))) {
3943 trace_pci_nvme_err_invalid_del_sq(qid
);
3944 return NVME_INVALID_QID
| NVME_DNR
;
3947 trace_pci_nvme_del_sq(qid
);
3950 while (!QTAILQ_EMPTY(&sq
->out_req_list
)) {
3951 r
= QTAILQ_FIRST(&sq
->out_req_list
);
3953 blk_aio_cancel(r
->aiocb
);
3956 assert(QTAILQ_EMPTY(&sq
->out_req_list
));
3958 if (!nvme_check_cqid(n
, sq
->cqid
)) {
3959 cq
= n
->cq
[sq
->cqid
];
3960 QTAILQ_REMOVE(&cq
->sq_list
, sq
, entry
);
3963 QTAILQ_FOREACH_SAFE(r
, &cq
->req_list
, entry
, next
) {
3965 QTAILQ_REMOVE(&cq
->req_list
, r
, entry
);
3966 QTAILQ_INSERT_TAIL(&sq
->req_list
, r
, entry
);
3971 nvme_free_sq(sq
, n
);
3972 return NVME_SUCCESS
;
3975 static void nvme_init_sq(NvmeSQueue
*sq
, NvmeCtrl
*n
, uint64_t dma_addr
,
3976 uint16_t sqid
, uint16_t cqid
, uint16_t size
)
3982 sq
->dma_addr
= dma_addr
;
3986 sq
->head
= sq
->tail
= 0;
3987 sq
->io_req
= g_new0(NvmeRequest
, sq
->size
);
3989 QTAILQ_INIT(&sq
->req_list
);
3990 QTAILQ_INIT(&sq
->out_req_list
);
3991 for (i
= 0; i
< sq
->size
; i
++) {
3992 sq
->io_req
[i
].sq
= sq
;
3993 QTAILQ_INSERT_TAIL(&(sq
->req_list
), &sq
->io_req
[i
], entry
);
3995 sq
->timer
= timer_new_ns(QEMU_CLOCK_VIRTUAL
, nvme_process_sq
, sq
);
3997 assert(n
->cq
[cqid
]);
3999 QTAILQ_INSERT_TAIL(&(cq
->sq_list
), sq
, entry
);
4003 static uint16_t nvme_create_sq(NvmeCtrl
*n
, NvmeRequest
*req
)
4006 NvmeCreateSq
*c
= (NvmeCreateSq
*)&req
->cmd
;
4008 uint16_t cqid
= le16_to_cpu(c
->cqid
);
4009 uint16_t sqid
= le16_to_cpu(c
->sqid
);
4010 uint16_t qsize
= le16_to_cpu(c
->qsize
);
4011 uint16_t qflags
= le16_to_cpu(c
->sq_flags
);
4012 uint64_t prp1
= le64_to_cpu(c
->prp1
);
4014 trace_pci_nvme_create_sq(prp1
, sqid
, cqid
, qsize
, qflags
);
4016 if (unlikely(!cqid
|| nvme_check_cqid(n
, cqid
))) {
4017 trace_pci_nvme_err_invalid_create_sq_cqid(cqid
);
4018 return NVME_INVALID_CQID
| NVME_DNR
;
4020 if (unlikely(!sqid
|| sqid
> n
->params
.max_ioqpairs
||
4021 n
->sq
[sqid
] != NULL
)) {
4022 trace_pci_nvme_err_invalid_create_sq_sqid(sqid
);
4023 return NVME_INVALID_QID
| NVME_DNR
;
4025 if (unlikely(!qsize
|| qsize
> NVME_CAP_MQES(n
->bar
.cap
))) {
4026 trace_pci_nvme_err_invalid_create_sq_size(qsize
);
4027 return NVME_MAX_QSIZE_EXCEEDED
| NVME_DNR
;
4029 if (unlikely(prp1
& (n
->page_size
- 1))) {
4030 trace_pci_nvme_err_invalid_create_sq_addr(prp1
);
4031 return NVME_INVALID_PRP_OFFSET
| NVME_DNR
;
4033 if (unlikely(!(NVME_SQ_FLAGS_PC(qflags
)))) {
4034 trace_pci_nvme_err_invalid_create_sq_qflags(NVME_SQ_FLAGS_PC(qflags
));
4035 return NVME_INVALID_FIELD
| NVME_DNR
;
4037 sq
= g_malloc0(sizeof(*sq
));
4038 nvme_init_sq(sq
, n
, prp1
, sqid
, cqid
, qsize
+ 1);
4039 return NVME_SUCCESS
;
4043 uint64_t units_read
;
4044 uint64_t units_written
;
4045 uint64_t read_commands
;
4046 uint64_t write_commands
;
4049 static void nvme_set_blk_stats(NvmeNamespace
*ns
, struct nvme_stats
*stats
)
4051 BlockAcctStats
*s
= blk_get_stats(ns
->blkconf
.blk
);
4053 stats
->units_read
+= s
->nr_bytes
[BLOCK_ACCT_READ
] >> BDRV_SECTOR_BITS
;
4054 stats
->units_written
+= s
->nr_bytes
[BLOCK_ACCT_WRITE
] >> BDRV_SECTOR_BITS
;
4055 stats
->read_commands
+= s
->nr_ops
[BLOCK_ACCT_READ
];
4056 stats
->write_commands
+= s
->nr_ops
[BLOCK_ACCT_WRITE
];
4059 static uint16_t nvme_smart_info(NvmeCtrl
*n
, uint8_t rae
, uint32_t buf_len
,
4060 uint64_t off
, NvmeRequest
*req
)
4062 uint32_t nsid
= le32_to_cpu(req
->cmd
.nsid
);
4063 struct nvme_stats stats
= { 0 };
4064 NvmeSmartLog smart
= { 0 };
4069 if (off
>= sizeof(smart
)) {
4070 return NVME_INVALID_FIELD
| NVME_DNR
;
4073 if (nsid
!= 0xffffffff) {
4074 ns
= nvme_ns(n
, nsid
);
4076 return NVME_INVALID_NSID
| NVME_DNR
;
4078 nvme_set_blk_stats(ns
, &stats
);
4082 for (i
= 1; i
<= NVME_MAX_NAMESPACES
; i
++) {
4087 nvme_set_blk_stats(ns
, &stats
);
4091 trans_len
= MIN(sizeof(smart
) - off
, buf_len
);
4092 smart
.critical_warning
= n
->smart_critical_warning
;
4094 smart
.data_units_read
[0] = cpu_to_le64(DIV_ROUND_UP(stats
.units_read
,
4096 smart
.data_units_written
[0] = cpu_to_le64(DIV_ROUND_UP(stats
.units_written
,
4098 smart
.host_read_commands
[0] = cpu_to_le64(stats
.read_commands
);
4099 smart
.host_write_commands
[0] = cpu_to_le64(stats
.write_commands
);
4101 smart
.temperature
= cpu_to_le16(n
->temperature
);
4103 if ((n
->temperature
>= n
->features
.temp_thresh_hi
) ||
4104 (n
->temperature
<= n
->features
.temp_thresh_low
)) {
4105 smart
.critical_warning
|= NVME_SMART_TEMPERATURE
;
4108 current_ms
= qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
);
4109 smart
.power_on_hours
[0] =
4110 cpu_to_le64((((current_ms
- n
->starttime_ms
) / 1000) / 60) / 60);
4113 nvme_clear_events(n
, NVME_AER_TYPE_SMART
);
4116 return nvme_c2h(n
, (uint8_t *) &smart
+ off
, trans_len
, req
);
4119 static uint16_t nvme_fw_log_info(NvmeCtrl
*n
, uint32_t buf_len
, uint64_t off
,
4123 NvmeFwSlotInfoLog fw_log
= {
4127 if (off
>= sizeof(fw_log
)) {
4128 return NVME_INVALID_FIELD
| NVME_DNR
;
4131 strpadcpy((char *)&fw_log
.frs1
, sizeof(fw_log
.frs1
), "1.0", ' ');
4132 trans_len
= MIN(sizeof(fw_log
) - off
, buf_len
);
4134 return nvme_c2h(n
, (uint8_t *) &fw_log
+ off
, trans_len
, req
);
4137 static uint16_t nvme_error_info(NvmeCtrl
*n
, uint8_t rae
, uint32_t buf_len
,
4138 uint64_t off
, NvmeRequest
*req
)
4141 NvmeErrorLog errlog
;
4143 if (off
>= sizeof(errlog
)) {
4144 return NVME_INVALID_FIELD
| NVME_DNR
;
4148 nvme_clear_events(n
, NVME_AER_TYPE_ERROR
);
4151 memset(&errlog
, 0x0, sizeof(errlog
));
4152 trans_len
= MIN(sizeof(errlog
) - off
, buf_len
);
4154 return nvme_c2h(n
, (uint8_t *)&errlog
, trans_len
, req
);
4157 static uint16_t nvme_changed_nslist(NvmeCtrl
*n
, uint8_t rae
, uint32_t buf_len
,
4158 uint64_t off
, NvmeRequest
*req
)
4160 uint32_t nslist
[1024];
4165 memset(nslist
, 0x0, sizeof(nslist
));
4166 trans_len
= MIN(sizeof(nslist
) - off
, buf_len
);
4168 while ((nsid
= find_first_bit(n
->changed_nsids
, NVME_CHANGED_NSID_SIZE
)) !=
4169 NVME_CHANGED_NSID_SIZE
) {
4171 * If more than 1024 namespaces, the first entry in the log page should
4172 * be set to FFFFFFFFh and the others to 0 as spec.
4174 if (i
== ARRAY_SIZE(nslist
)) {
4175 memset(nslist
, 0x0, sizeof(nslist
));
4176 nslist
[0] = 0xffffffff;
4181 clear_bit(nsid
, n
->changed_nsids
);
4185 * Remove all the remaining list entries in case returns directly due to
4186 * more than 1024 namespaces.
4188 if (nslist
[0] == 0xffffffff) {
4189 bitmap_zero(n
->changed_nsids
, NVME_CHANGED_NSID_SIZE
);
4193 nvme_clear_events(n
, NVME_AER_TYPE_NOTICE
);
4196 return nvme_c2h(n
, ((uint8_t *)nslist
) + off
, trans_len
, req
);
4199 static uint16_t nvme_cmd_effects(NvmeCtrl
*n
, uint8_t csi
, uint32_t buf_len
,
4200 uint64_t off
, NvmeRequest
*req
)
4202 NvmeEffectsLog log
= {};
4203 const uint32_t *src_iocs
= NULL
;
4206 if (off
>= sizeof(log
)) {
4207 trace_pci_nvme_err_invalid_log_page_offset(off
, sizeof(log
));
4208 return NVME_INVALID_FIELD
| NVME_DNR
;
4211 switch (NVME_CC_CSS(n
->bar
.cc
)) {
4212 case NVME_CC_CSS_NVM
:
4213 src_iocs
= nvme_cse_iocs_nvm
;
4215 case NVME_CC_CSS_ADMIN_ONLY
:
4217 case NVME_CC_CSS_CSI
:
4220 src_iocs
= nvme_cse_iocs_nvm
;
4222 case NVME_CSI_ZONED
:
4223 src_iocs
= nvme_cse_iocs_zoned
;
4228 memcpy(log
.acs
, nvme_cse_acs
, sizeof(nvme_cse_acs
));
4231 memcpy(log
.iocs
, src_iocs
, sizeof(log
.iocs
));
4234 trans_len
= MIN(sizeof(log
) - off
, buf_len
);
4236 return nvme_c2h(n
, ((uint8_t *)&log
) + off
, trans_len
, req
);
4239 static uint16_t nvme_get_log(NvmeCtrl
*n
, NvmeRequest
*req
)
4241 NvmeCmd
*cmd
= &req
->cmd
;
4243 uint32_t dw10
= le32_to_cpu(cmd
->cdw10
);
4244 uint32_t dw11
= le32_to_cpu(cmd
->cdw11
);
4245 uint32_t dw12
= le32_to_cpu(cmd
->cdw12
);
4246 uint32_t dw13
= le32_to_cpu(cmd
->cdw13
);
4247 uint8_t lid
= dw10
& 0xff;
4248 uint8_t lsp
= (dw10
>> 8) & 0xf;
4249 uint8_t rae
= (dw10
>> 15) & 0x1;
4250 uint8_t csi
= le32_to_cpu(cmd
->cdw14
) >> 24;
4251 uint32_t numdl
, numdu
;
4252 uint64_t off
, lpol
, lpou
;
4256 numdl
= (dw10
>> 16);
4257 numdu
= (dw11
& 0xffff);
4261 len
= (((numdu
<< 16) | numdl
) + 1) << 2;
4262 off
= (lpou
<< 32ULL) | lpol
;
4265 return NVME_INVALID_FIELD
| NVME_DNR
;
4268 trace_pci_nvme_get_log(nvme_cid(req
), lid
, lsp
, rae
, len
, off
);
4270 status
= nvme_check_mdts(n
, len
);
4276 case NVME_LOG_ERROR_INFO
:
4277 return nvme_error_info(n
, rae
, len
, off
, req
);
4278 case NVME_LOG_SMART_INFO
:
4279 return nvme_smart_info(n
, rae
, len
, off
, req
);
4280 case NVME_LOG_FW_SLOT_INFO
:
4281 return nvme_fw_log_info(n
, len
, off
, req
);
4282 case NVME_LOG_CHANGED_NSLIST
:
4283 return nvme_changed_nslist(n
, rae
, len
, off
, req
);
4284 case NVME_LOG_CMD_EFFECTS
:
4285 return nvme_cmd_effects(n
, csi
, len
, off
, req
);
4287 trace_pci_nvme_err_invalid_log_page(nvme_cid(req
), lid
);
4288 return NVME_INVALID_FIELD
| NVME_DNR
;
4292 static void nvme_free_cq(NvmeCQueue
*cq
, NvmeCtrl
*n
)
4294 n
->cq
[cq
->cqid
] = NULL
;
4295 timer_free(cq
->timer
);
4296 if (msix_enabled(&n
->parent_obj
)) {
4297 msix_vector_unuse(&n
->parent_obj
, cq
->vector
);
4304 static uint16_t nvme_del_cq(NvmeCtrl
*n
, NvmeRequest
*req
)
4306 NvmeDeleteQ
*c
= (NvmeDeleteQ
*)&req
->cmd
;
4308 uint16_t qid
= le16_to_cpu(c
->qid
);
4310 if (unlikely(!qid
|| nvme_check_cqid(n
, qid
))) {
4311 trace_pci_nvme_err_invalid_del_cq_cqid(qid
);
4312 return NVME_INVALID_CQID
| NVME_DNR
;
4316 if (unlikely(!QTAILQ_EMPTY(&cq
->sq_list
))) {
4317 trace_pci_nvme_err_invalid_del_cq_notempty(qid
);
4318 return NVME_INVALID_QUEUE_DEL
;
4321 if (cq
->irq_enabled
&& cq
->tail
!= cq
->head
) {
4325 nvme_irq_deassert(n
, cq
);
4326 trace_pci_nvme_del_cq(qid
);
4327 nvme_free_cq(cq
, n
);
4328 return NVME_SUCCESS
;
4331 static void nvme_init_cq(NvmeCQueue
*cq
, NvmeCtrl
*n
, uint64_t dma_addr
,
4332 uint16_t cqid
, uint16_t vector
, uint16_t size
,
4333 uint16_t irq_enabled
)
4337 if (msix_enabled(&n
->parent_obj
)) {
4338 ret
= msix_vector_use(&n
->parent_obj
, vector
);
4344 cq
->dma_addr
= dma_addr
;
4346 cq
->irq_enabled
= irq_enabled
;
4347 cq
->vector
= vector
;
4348 cq
->head
= cq
->tail
= 0;
4349 QTAILQ_INIT(&cq
->req_list
);
4350 QTAILQ_INIT(&cq
->sq_list
);
4352 cq
->timer
= timer_new_ns(QEMU_CLOCK_VIRTUAL
, nvme_post_cqes
, cq
);
4355 static uint16_t nvme_create_cq(NvmeCtrl
*n
, NvmeRequest
*req
)
4358 NvmeCreateCq
*c
= (NvmeCreateCq
*)&req
->cmd
;
4359 uint16_t cqid
= le16_to_cpu(c
->cqid
);
4360 uint16_t vector
= le16_to_cpu(c
->irq_vector
);
4361 uint16_t qsize
= le16_to_cpu(c
->qsize
);
4362 uint16_t qflags
= le16_to_cpu(c
->cq_flags
);
4363 uint64_t prp1
= le64_to_cpu(c
->prp1
);
4365 trace_pci_nvme_create_cq(prp1
, cqid
, vector
, qsize
, qflags
,
4366 NVME_CQ_FLAGS_IEN(qflags
) != 0);
4368 if (unlikely(!cqid
|| cqid
> n
->params
.max_ioqpairs
||
4369 n
->cq
[cqid
] != NULL
)) {
4370 trace_pci_nvme_err_invalid_create_cq_cqid(cqid
);
4371 return NVME_INVALID_QID
| NVME_DNR
;
4373 if (unlikely(!qsize
|| qsize
> NVME_CAP_MQES(n
->bar
.cap
))) {
4374 trace_pci_nvme_err_invalid_create_cq_size(qsize
);
4375 return NVME_MAX_QSIZE_EXCEEDED
| NVME_DNR
;
4377 if (unlikely(prp1
& (n
->page_size
- 1))) {
4378 trace_pci_nvme_err_invalid_create_cq_addr(prp1
);
4379 return NVME_INVALID_PRP_OFFSET
| NVME_DNR
;
4381 if (unlikely(!msix_enabled(&n
->parent_obj
) && vector
)) {
4382 trace_pci_nvme_err_invalid_create_cq_vector(vector
);
4383 return NVME_INVALID_IRQ_VECTOR
| NVME_DNR
;
4385 if (unlikely(vector
>= n
->params
.msix_qsize
)) {
4386 trace_pci_nvme_err_invalid_create_cq_vector(vector
);
4387 return NVME_INVALID_IRQ_VECTOR
| NVME_DNR
;
4389 if (unlikely(!(NVME_CQ_FLAGS_PC(qflags
)))) {
4390 trace_pci_nvme_err_invalid_create_cq_qflags(NVME_CQ_FLAGS_PC(qflags
));
4391 return NVME_INVALID_FIELD
| NVME_DNR
;
4394 cq
= g_malloc0(sizeof(*cq
));
4395 nvme_init_cq(cq
, n
, prp1
, cqid
, vector
, qsize
+ 1,
4396 NVME_CQ_FLAGS_IEN(qflags
));
4399 * It is only required to set qs_created when creating a completion queue;
4400 * creating a submission queue without a matching completion queue will
4403 n
->qs_created
= true;
4404 return NVME_SUCCESS
;
4407 static uint16_t nvme_rpt_empty_id_struct(NvmeCtrl
*n
, NvmeRequest
*req
)
4409 uint8_t id
[NVME_IDENTIFY_DATA_SIZE
] = {};
4411 return nvme_c2h(n
, id
, sizeof(id
), req
);
4414 static uint16_t nvme_identify_ctrl(NvmeCtrl
*n
, NvmeRequest
*req
)
4416 trace_pci_nvme_identify_ctrl();
4418 return nvme_c2h(n
, (uint8_t *)&n
->id_ctrl
, sizeof(n
->id_ctrl
), req
);
4421 static uint16_t nvme_identify_ctrl_csi(NvmeCtrl
*n
, NvmeRequest
*req
)
4423 NvmeIdentify
*c
= (NvmeIdentify
*)&req
->cmd
;
4424 uint8_t id
[NVME_IDENTIFY_DATA_SIZE
] = {};
4425 NvmeIdCtrlNvm
*id_nvm
= (NvmeIdCtrlNvm
*)&id
;
4427 trace_pci_nvme_identify_ctrl_csi(c
->csi
);
4431 id_nvm
->vsl
= n
->params
.vsl
;
4432 id_nvm
->dmrsl
= cpu_to_le32(n
->dmrsl
);
4435 case NVME_CSI_ZONED
:
4436 ((NvmeIdCtrlZoned
*)&id
)->zasl
= n
->params
.zasl
;
4440 return NVME_INVALID_FIELD
| NVME_DNR
;
4443 return nvme_c2h(n
, id
, sizeof(id
), req
);
4446 static uint16_t nvme_identify_ns(NvmeCtrl
*n
, NvmeRequest
*req
, bool active
)
4449 NvmeIdentify
*c
= (NvmeIdentify
*)&req
->cmd
;
4450 uint32_t nsid
= le32_to_cpu(c
->nsid
);
4452 trace_pci_nvme_identify_ns(nsid
);
4454 if (!nvme_nsid_valid(n
, nsid
) || nsid
== NVME_NSID_BROADCAST
) {
4455 return NVME_INVALID_NSID
| NVME_DNR
;
4458 ns
= nvme_ns(n
, nsid
);
4459 if (unlikely(!ns
)) {
4461 ns
= nvme_subsys_ns(n
->subsys
, nsid
);
4463 return nvme_rpt_empty_id_struct(n
, req
);
4466 return nvme_rpt_empty_id_struct(n
, req
);
4470 if (active
|| ns
->csi
== NVME_CSI_NVM
) {
4471 return nvme_c2h(n
, (uint8_t *)&ns
->id_ns
, sizeof(NvmeIdNs
), req
);
4474 return NVME_INVALID_CMD_SET
| NVME_DNR
;
4477 static uint16_t nvme_identify_ctrl_list(NvmeCtrl
*n
, NvmeRequest
*req
,
4480 NvmeIdentify
*c
= (NvmeIdentify
*)&req
->cmd
;
4481 uint32_t nsid
= le32_to_cpu(c
->nsid
);
4482 uint16_t min_id
= le16_to_cpu(c
->ctrlid
);
4483 uint16_t list
[NVME_CONTROLLER_LIST_SIZE
] = {};
4484 uint16_t *ids
= &list
[1];
4487 int cntlid
, nr_ids
= 0;
4489 trace_pci_nvme_identify_ctrl_list(c
->cns
, min_id
);
4492 return NVME_INVALID_FIELD
| NVME_DNR
;
4496 if (nsid
== NVME_NSID_BROADCAST
) {
4497 return NVME_INVALID_FIELD
| NVME_DNR
;
4500 ns
= nvme_subsys_ns(n
->subsys
, nsid
);
4502 return NVME_INVALID_FIELD
| NVME_DNR
;
4506 for (cntlid
= min_id
; cntlid
< ARRAY_SIZE(n
->subsys
->ctrls
); cntlid
++) {
4507 ctrl
= nvme_subsys_ctrl(n
->subsys
, cntlid
);
4512 if (attached
&& !nvme_ns(ctrl
, nsid
)) {
4516 ids
[nr_ids
++] = cntlid
;
4521 return nvme_c2h(n
, (uint8_t *)list
, sizeof(list
), req
);
4524 static uint16_t nvme_identify_ns_csi(NvmeCtrl
*n
, NvmeRequest
*req
,
4528 NvmeIdentify
*c
= (NvmeIdentify
*)&req
->cmd
;
4529 uint32_t nsid
= le32_to_cpu(c
->nsid
);
4531 trace_pci_nvme_identify_ns_csi(nsid
, c
->csi
);
4533 if (!nvme_nsid_valid(n
, nsid
) || nsid
== NVME_NSID_BROADCAST
) {
4534 return NVME_INVALID_NSID
| NVME_DNR
;
4537 ns
= nvme_ns(n
, nsid
);
4538 if (unlikely(!ns
)) {
4540 ns
= nvme_subsys_ns(n
->subsys
, nsid
);
4542 return nvme_rpt_empty_id_struct(n
, req
);
4545 return nvme_rpt_empty_id_struct(n
, req
);
4549 if (c
->csi
== NVME_CSI_NVM
) {
4550 return nvme_rpt_empty_id_struct(n
, req
);
4551 } else if (c
->csi
== NVME_CSI_ZONED
&& ns
->csi
== NVME_CSI_ZONED
) {
4552 return nvme_c2h(n
, (uint8_t *)ns
->id_ns_zoned
, sizeof(NvmeIdNsZoned
),
4556 return NVME_INVALID_FIELD
| NVME_DNR
;
4559 static uint16_t nvme_identify_nslist(NvmeCtrl
*n
, NvmeRequest
*req
,
4563 NvmeIdentify
*c
= (NvmeIdentify
*)&req
->cmd
;
4564 uint32_t min_nsid
= le32_to_cpu(c
->nsid
);
4565 uint8_t list
[NVME_IDENTIFY_DATA_SIZE
] = {};
4566 static const int data_len
= sizeof(list
);
4567 uint32_t *list_ptr
= (uint32_t *)list
;
4570 trace_pci_nvme_identify_nslist(min_nsid
);
4573 * Both FFFFFFFFh (NVME_NSID_BROADCAST) and FFFFFFFFEh are invalid values
4574 * since the Active Namespace ID List should return namespaces with ids
4575 * *higher* than the NSID specified in the command. This is also specified
4576 * in the spec (NVM Express v1.3d, Section 5.15.4).
4578 if (min_nsid
>= NVME_NSID_BROADCAST
- 1) {
4579 return NVME_INVALID_NSID
| NVME_DNR
;
4582 for (i
= 1; i
<= NVME_MAX_NAMESPACES
; i
++) {
4586 ns
= nvme_subsys_ns(n
->subsys
, i
);
4594 if (ns
->params
.nsid
<= min_nsid
) {
4597 list_ptr
[j
++] = cpu_to_le32(ns
->params
.nsid
);
4598 if (j
== data_len
/ sizeof(uint32_t)) {
4603 return nvme_c2h(n
, list
, data_len
, req
);
4606 static uint16_t nvme_identify_nslist_csi(NvmeCtrl
*n
, NvmeRequest
*req
,
4610 NvmeIdentify
*c
= (NvmeIdentify
*)&req
->cmd
;
4611 uint32_t min_nsid
= le32_to_cpu(c
->nsid
);
4612 uint8_t list
[NVME_IDENTIFY_DATA_SIZE
] = {};
4613 static const int data_len
= sizeof(list
);
4614 uint32_t *list_ptr
= (uint32_t *)list
;
4617 trace_pci_nvme_identify_nslist_csi(min_nsid
, c
->csi
);
4620 * Same as in nvme_identify_nslist(), FFFFFFFFh/FFFFFFFFEh are invalid.
4622 if (min_nsid
>= NVME_NSID_BROADCAST
- 1) {
4623 return NVME_INVALID_NSID
| NVME_DNR
;
4626 if (c
->csi
!= NVME_CSI_NVM
&& c
->csi
!= NVME_CSI_ZONED
) {
4627 return NVME_INVALID_FIELD
| NVME_DNR
;
4630 for (i
= 1; i
<= NVME_MAX_NAMESPACES
; i
++) {
4634 ns
= nvme_subsys_ns(n
->subsys
, i
);
4642 if (ns
->params
.nsid
<= min_nsid
|| c
->csi
!= ns
->csi
) {
4645 list_ptr
[j
++] = cpu_to_le32(ns
->params
.nsid
);
4646 if (j
== data_len
/ sizeof(uint32_t)) {
4651 return nvme_c2h(n
, list
, data_len
, req
);
4654 static uint16_t nvme_identify_ns_descr_list(NvmeCtrl
*n
, NvmeRequest
*req
)
4657 NvmeIdentify
*c
= (NvmeIdentify
*)&req
->cmd
;
4658 uint32_t nsid
= le32_to_cpu(c
->nsid
);
4659 uint8_t list
[NVME_IDENTIFY_DATA_SIZE
] = {};
4660 uint8_t *pos
= list
;
4663 uint8_t v
[NVME_NIDL_UUID
];
4668 } QEMU_PACKED eui64
;
4674 trace_pci_nvme_identify_ns_descr_list(nsid
);
4676 if (!nvme_nsid_valid(n
, nsid
) || nsid
== NVME_NSID_BROADCAST
) {
4677 return NVME_INVALID_NSID
| NVME_DNR
;
4680 ns
= nvme_ns(n
, nsid
);
4681 if (unlikely(!ns
)) {
4682 return NVME_INVALID_FIELD
| NVME_DNR
;
4686 * If the EUI-64 field is 0 and the NGUID field is 0, the namespace must
4687 * provide a valid Namespace UUID in the Namespace Identification Descriptor
4688 * data structure. QEMU does not yet support setting NGUID.
4690 uuid
.hdr
.nidt
= NVME_NIDT_UUID
;
4691 uuid
.hdr
.nidl
= NVME_NIDL_UUID
;
4692 memcpy(uuid
.v
, ns
->params
.uuid
.data
, NVME_NIDL_UUID
);
4693 memcpy(pos
, &uuid
, sizeof(uuid
));
4694 pos
+= sizeof(uuid
);
4696 if (ns
->params
.eui64
) {
4697 eui64
.hdr
.nidt
= NVME_NIDT_EUI64
;
4698 eui64
.hdr
.nidl
= NVME_NIDL_EUI64
;
4699 eui64
.v
= cpu_to_be64(ns
->params
.eui64
);
4700 memcpy(pos
, &eui64
, sizeof(eui64
));
4701 pos
+= sizeof(eui64
);
4704 csi
.hdr
.nidt
= NVME_NIDT_CSI
;
4705 csi
.hdr
.nidl
= NVME_NIDL_CSI
;
4707 memcpy(pos
, &csi
, sizeof(csi
));
4710 return nvme_c2h(n
, list
, sizeof(list
), req
);
4713 static uint16_t nvme_identify_cmd_set(NvmeCtrl
*n
, NvmeRequest
*req
)
4715 uint8_t list
[NVME_IDENTIFY_DATA_SIZE
] = {};
4716 static const int data_len
= sizeof(list
);
4718 trace_pci_nvme_identify_cmd_set();
4720 NVME_SET_CSI(*list
, NVME_CSI_NVM
);
4721 NVME_SET_CSI(*list
, NVME_CSI_ZONED
);
4723 return nvme_c2h(n
, list
, data_len
, req
);
4726 static uint16_t nvme_identify(NvmeCtrl
*n
, NvmeRequest
*req
)
4728 NvmeIdentify
*c
= (NvmeIdentify
*)&req
->cmd
;
4730 trace_pci_nvme_identify(nvme_cid(req
), c
->cns
, le16_to_cpu(c
->ctrlid
),
4734 case NVME_ID_CNS_NS
:
4735 return nvme_identify_ns(n
, req
, true);
4736 case NVME_ID_CNS_NS_PRESENT
:
4737 return nvme_identify_ns(n
, req
, false);
4738 case NVME_ID_CNS_NS_ATTACHED_CTRL_LIST
:
4739 return nvme_identify_ctrl_list(n
, req
, true);
4740 case NVME_ID_CNS_CTRL_LIST
:
4741 return nvme_identify_ctrl_list(n
, req
, false);
4742 case NVME_ID_CNS_CS_NS
:
4743 return nvme_identify_ns_csi(n
, req
, true);
4744 case NVME_ID_CNS_CS_NS_PRESENT
:
4745 return nvme_identify_ns_csi(n
, req
, false);
4746 case NVME_ID_CNS_CTRL
:
4747 return nvme_identify_ctrl(n
, req
);
4748 case NVME_ID_CNS_CS_CTRL
:
4749 return nvme_identify_ctrl_csi(n
, req
);
4750 case NVME_ID_CNS_NS_ACTIVE_LIST
:
4751 return nvme_identify_nslist(n
, req
, true);
4752 case NVME_ID_CNS_NS_PRESENT_LIST
:
4753 return nvme_identify_nslist(n
, req
, false);
4754 case NVME_ID_CNS_CS_NS_ACTIVE_LIST
:
4755 return nvme_identify_nslist_csi(n
, req
, true);
4756 case NVME_ID_CNS_CS_NS_PRESENT_LIST
:
4757 return nvme_identify_nslist_csi(n
, req
, false);
4758 case NVME_ID_CNS_NS_DESCR_LIST
:
4759 return nvme_identify_ns_descr_list(n
, req
);
4760 case NVME_ID_CNS_IO_COMMAND_SET
:
4761 return nvme_identify_cmd_set(n
, req
);
4763 trace_pci_nvme_err_invalid_identify_cns(le32_to_cpu(c
->cns
));
4764 return NVME_INVALID_FIELD
| NVME_DNR
;
4768 static uint16_t nvme_abort(NvmeCtrl
*n
, NvmeRequest
*req
)
4770 uint16_t sqid
= le32_to_cpu(req
->cmd
.cdw10
) & 0xffff;
4772 req
->cqe
.result
= 1;
4773 if (nvme_check_sqid(n
, sqid
)) {
4774 return NVME_INVALID_FIELD
| NVME_DNR
;
4777 return NVME_SUCCESS
;
4780 static inline void nvme_set_timestamp(NvmeCtrl
*n
, uint64_t ts
)
4782 trace_pci_nvme_setfeat_timestamp(ts
);
4784 n
->host_timestamp
= le64_to_cpu(ts
);
4785 n
->timestamp_set_qemu_clock_ms
= qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
);
4788 static inline uint64_t nvme_get_timestamp(const NvmeCtrl
*n
)
4790 uint64_t current_time
= qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
);
4791 uint64_t elapsed_time
= current_time
- n
->timestamp_set_qemu_clock_ms
;
4793 union nvme_timestamp
{
4795 uint64_t timestamp
:48;
4803 union nvme_timestamp ts
;
4805 ts
.timestamp
= n
->host_timestamp
+ elapsed_time
;
4807 /* If the host timestamp is non-zero, set the timestamp origin */
4808 ts
.origin
= n
->host_timestamp
? 0x01 : 0x00;
4810 trace_pci_nvme_getfeat_timestamp(ts
.all
);
4812 return cpu_to_le64(ts
.all
);
4815 static uint16_t nvme_get_feature_timestamp(NvmeCtrl
*n
, NvmeRequest
*req
)
4817 uint64_t timestamp
= nvme_get_timestamp(n
);
4819 return nvme_c2h(n
, (uint8_t *)×tamp
, sizeof(timestamp
), req
);
4822 static uint16_t nvme_get_feature(NvmeCtrl
*n
, NvmeRequest
*req
)
4824 NvmeCmd
*cmd
= &req
->cmd
;
4825 uint32_t dw10
= le32_to_cpu(cmd
->cdw10
);
4826 uint32_t dw11
= le32_to_cpu(cmd
->cdw11
);
4827 uint32_t nsid
= le32_to_cpu(cmd
->nsid
);
4829 uint8_t fid
= NVME_GETSETFEAT_FID(dw10
);
4830 NvmeGetFeatureSelect sel
= NVME_GETFEAT_SELECT(dw10
);
4835 static const uint32_t nvme_feature_default
[NVME_FID_MAX
] = {
4836 [NVME_ARBITRATION
] = NVME_ARB_AB_NOLIMIT
,
4839 trace_pci_nvme_getfeat(nvme_cid(req
), nsid
, fid
, sel
, dw11
);
4841 if (!nvme_feature_support
[fid
]) {
4842 return NVME_INVALID_FIELD
| NVME_DNR
;
4845 if (nvme_feature_cap
[fid
] & NVME_FEAT_CAP_NS
) {
4846 if (!nvme_nsid_valid(n
, nsid
) || nsid
== NVME_NSID_BROADCAST
) {
4848 * The Reservation Notification Mask and Reservation Persistence
4849 * features require a status code of Invalid Field in Command when
4850 * NSID is FFFFFFFFh. Since the device does not support those
4851 * features we can always return Invalid Namespace or Format as we
4852 * should do for all other features.
4854 return NVME_INVALID_NSID
| NVME_DNR
;
4857 if (!nvme_ns(n
, nsid
)) {
4858 return NVME_INVALID_FIELD
| NVME_DNR
;
4863 case NVME_GETFEAT_SELECT_CURRENT
:
4865 case NVME_GETFEAT_SELECT_SAVED
:
4866 /* no features are saveable by the controller; fallthrough */
4867 case NVME_GETFEAT_SELECT_DEFAULT
:
4869 case NVME_GETFEAT_SELECT_CAP
:
4870 result
= nvme_feature_cap
[fid
];
4875 case NVME_TEMPERATURE_THRESHOLD
:
4879 * The controller only implements the Composite Temperature sensor, so
4880 * return 0 for all other sensors.
4882 if (NVME_TEMP_TMPSEL(dw11
) != NVME_TEMP_TMPSEL_COMPOSITE
) {
4886 switch (NVME_TEMP_THSEL(dw11
)) {
4887 case NVME_TEMP_THSEL_OVER
:
4888 result
= n
->features
.temp_thresh_hi
;
4890 case NVME_TEMP_THSEL_UNDER
:
4891 result
= n
->features
.temp_thresh_low
;
4895 return NVME_INVALID_FIELD
| NVME_DNR
;
4896 case NVME_ERROR_RECOVERY
:
4897 if (!nvme_nsid_valid(n
, nsid
)) {
4898 return NVME_INVALID_NSID
| NVME_DNR
;
4901 ns
= nvme_ns(n
, nsid
);
4902 if (unlikely(!ns
)) {
4903 return NVME_INVALID_FIELD
| NVME_DNR
;
4906 result
= ns
->features
.err_rec
;
4908 case NVME_VOLATILE_WRITE_CACHE
:
4910 for (i
= 1; i
<= NVME_MAX_NAMESPACES
; i
++) {
4916 result
= blk_enable_write_cache(ns
->blkconf
.blk
);
4921 trace_pci_nvme_getfeat_vwcache(result
? "enabled" : "disabled");
4923 case NVME_ASYNCHRONOUS_EVENT_CONF
:
4924 result
= n
->features
.async_config
;
4926 case NVME_TIMESTAMP
:
4927 return nvme_get_feature_timestamp(n
, req
);
4934 case NVME_TEMPERATURE_THRESHOLD
:
4937 if (NVME_TEMP_TMPSEL(dw11
) != NVME_TEMP_TMPSEL_COMPOSITE
) {
4941 if (NVME_TEMP_THSEL(dw11
) == NVME_TEMP_THSEL_OVER
) {
4942 result
= NVME_TEMPERATURE_WARNING
;
4946 case NVME_NUMBER_OF_QUEUES
:
4947 result
= (n
->params
.max_ioqpairs
- 1) |
4948 ((n
->params
.max_ioqpairs
- 1) << 16);
4949 trace_pci_nvme_getfeat_numq(result
);
4951 case NVME_INTERRUPT_VECTOR_CONF
:
4953 if (iv
>= n
->params
.max_ioqpairs
+ 1) {
4954 return NVME_INVALID_FIELD
| NVME_DNR
;
4958 if (iv
== n
->admin_cq
.vector
) {
4959 result
|= NVME_INTVC_NOCOALESCING
;
4963 result
= nvme_feature_default
[fid
];
4968 req
->cqe
.result
= cpu_to_le32(result
);
4969 return NVME_SUCCESS
;
4972 static uint16_t nvme_set_feature_timestamp(NvmeCtrl
*n
, NvmeRequest
*req
)
4977 ret
= nvme_h2c(n
, (uint8_t *)×tamp
, sizeof(timestamp
), req
);
4982 nvme_set_timestamp(n
, timestamp
);
4984 return NVME_SUCCESS
;
4987 static uint16_t nvme_set_feature(NvmeCtrl
*n
, NvmeRequest
*req
)
4989 NvmeNamespace
*ns
= NULL
;
4991 NvmeCmd
*cmd
= &req
->cmd
;
4992 uint32_t dw10
= le32_to_cpu(cmd
->cdw10
);
4993 uint32_t dw11
= le32_to_cpu(cmd
->cdw11
);
4994 uint32_t nsid
= le32_to_cpu(cmd
->nsid
);
4995 uint8_t fid
= NVME_GETSETFEAT_FID(dw10
);
4996 uint8_t save
= NVME_SETFEAT_SAVE(dw10
);
4999 trace_pci_nvme_setfeat(nvme_cid(req
), nsid
, fid
, save
, dw11
);
5001 if (save
&& !(nvme_feature_cap
[fid
] & NVME_FEAT_CAP_SAVE
)) {
5002 return NVME_FID_NOT_SAVEABLE
| NVME_DNR
;
5005 if (!nvme_feature_support
[fid
]) {
5006 return NVME_INVALID_FIELD
| NVME_DNR
;
5009 if (nvme_feature_cap
[fid
] & NVME_FEAT_CAP_NS
) {
5010 if (nsid
!= NVME_NSID_BROADCAST
) {
5011 if (!nvme_nsid_valid(n
, nsid
)) {
5012 return NVME_INVALID_NSID
| NVME_DNR
;
5015 ns
= nvme_ns(n
, nsid
);
5016 if (unlikely(!ns
)) {
5017 return NVME_INVALID_FIELD
| NVME_DNR
;
5020 } else if (nsid
&& nsid
!= NVME_NSID_BROADCAST
) {
5021 if (!nvme_nsid_valid(n
, nsid
)) {
5022 return NVME_INVALID_NSID
| NVME_DNR
;
5025 return NVME_FEAT_NOT_NS_SPEC
| NVME_DNR
;
5028 if (!(nvme_feature_cap
[fid
] & NVME_FEAT_CAP_CHANGE
)) {
5029 return NVME_FEAT_NOT_CHANGEABLE
| NVME_DNR
;
5033 case NVME_TEMPERATURE_THRESHOLD
:
5034 if (NVME_TEMP_TMPSEL(dw11
) != NVME_TEMP_TMPSEL_COMPOSITE
) {
5038 switch (NVME_TEMP_THSEL(dw11
)) {
5039 case NVME_TEMP_THSEL_OVER
:
5040 n
->features
.temp_thresh_hi
= NVME_TEMP_TMPTH(dw11
);
5042 case NVME_TEMP_THSEL_UNDER
:
5043 n
->features
.temp_thresh_low
= NVME_TEMP_TMPTH(dw11
);
5046 return NVME_INVALID_FIELD
| NVME_DNR
;
5049 if ((n
->temperature
>= n
->features
.temp_thresh_hi
) ||
5050 (n
->temperature
<= n
->features
.temp_thresh_low
)) {
5051 nvme_smart_event(n
, NVME_AER_INFO_SMART_TEMP_THRESH
);
5055 case NVME_ERROR_RECOVERY
:
5056 if (nsid
== NVME_NSID_BROADCAST
) {
5057 for (i
= 1; i
<= NVME_MAX_NAMESPACES
; i
++) {
5064 if (NVME_ID_NS_NSFEAT_DULBE(ns
->id_ns
.nsfeat
)) {
5065 ns
->features
.err_rec
= dw11
;
5073 if (NVME_ID_NS_NSFEAT_DULBE(ns
->id_ns
.nsfeat
)) {
5074 ns
->features
.err_rec
= dw11
;
5077 case NVME_VOLATILE_WRITE_CACHE
:
5078 for (i
= 1; i
<= NVME_MAX_NAMESPACES
; i
++) {
5084 if (!(dw11
& 0x1) && blk_enable_write_cache(ns
->blkconf
.blk
)) {
5085 blk_flush(ns
->blkconf
.blk
);
5088 blk_set_enable_write_cache(ns
->blkconf
.blk
, dw11
& 1);
5093 case NVME_NUMBER_OF_QUEUES
:
5094 if (n
->qs_created
) {
5095 return NVME_CMD_SEQ_ERROR
| NVME_DNR
;
5099 * NVMe v1.3, Section 5.21.1.7: FFFFh is not an allowed value for NCQR
5102 if ((dw11
& 0xffff) == 0xffff || ((dw11
>> 16) & 0xffff) == 0xffff) {
5103 return NVME_INVALID_FIELD
| NVME_DNR
;
5106 trace_pci_nvme_setfeat_numq((dw11
& 0xffff) + 1,
5107 ((dw11
>> 16) & 0xffff) + 1,
5108 n
->params
.max_ioqpairs
,
5109 n
->params
.max_ioqpairs
);
5110 req
->cqe
.result
= cpu_to_le32((n
->params
.max_ioqpairs
- 1) |
5111 ((n
->params
.max_ioqpairs
- 1) << 16));
5113 case NVME_ASYNCHRONOUS_EVENT_CONF
:
5114 n
->features
.async_config
= dw11
;
5116 case NVME_TIMESTAMP
:
5117 return nvme_set_feature_timestamp(n
, req
);
5118 case NVME_COMMAND_SET_PROFILE
:
5120 trace_pci_nvme_err_invalid_iocsci(dw11
& 0x1ff);
5121 return NVME_CMD_SET_CMB_REJECTED
| NVME_DNR
;
5125 return NVME_FEAT_NOT_CHANGEABLE
| NVME_DNR
;
5127 return NVME_SUCCESS
;
5130 static uint16_t nvme_aer(NvmeCtrl
*n
, NvmeRequest
*req
)
5132 trace_pci_nvme_aer(nvme_cid(req
));
5134 if (n
->outstanding_aers
> n
->params
.aerl
) {
5135 trace_pci_nvme_aer_aerl_exceeded();
5136 return NVME_AER_LIMIT_EXCEEDED
;
5139 n
->aer_reqs
[n
->outstanding_aers
] = req
;
5140 n
->outstanding_aers
++;
5142 if (!QTAILQ_EMPTY(&n
->aer_queue
)) {
5143 nvme_process_aers(n
);
5146 return NVME_NO_COMPLETE
;
5149 static void nvme_update_dmrsl(NvmeCtrl
*n
)
5153 for (nsid
= 1; nsid
<= NVME_MAX_NAMESPACES
; nsid
++) {
5154 NvmeNamespace
*ns
= nvme_ns(n
, nsid
);
5159 n
->dmrsl
= MIN_NON_ZERO(n
->dmrsl
,
5160 BDRV_REQUEST_MAX_BYTES
/ nvme_l2b(ns
, 1));
5164 static void nvme_select_iocs_ns(NvmeCtrl
*n
, NvmeNamespace
*ns
)
5166 ns
->iocs
= nvme_cse_iocs_none
;
5169 if (NVME_CC_CSS(n
->bar
.cc
) != NVME_CC_CSS_ADMIN_ONLY
) {
5170 ns
->iocs
= nvme_cse_iocs_nvm
;
5173 case NVME_CSI_ZONED
:
5174 if (NVME_CC_CSS(n
->bar
.cc
) == NVME_CC_CSS_CSI
) {
5175 ns
->iocs
= nvme_cse_iocs_zoned
;
5176 } else if (NVME_CC_CSS(n
->bar
.cc
) == NVME_CC_CSS_NVM
) {
5177 ns
->iocs
= nvme_cse_iocs_nvm
;
5183 static uint16_t nvme_ns_attachment(NvmeCtrl
*n
, NvmeRequest
*req
)
5187 uint16_t list
[NVME_CONTROLLER_LIST_SIZE
] = {};
5188 uint32_t nsid
= le32_to_cpu(req
->cmd
.nsid
);
5189 uint32_t dw10
= le32_to_cpu(req
->cmd
.cdw10
);
5190 bool attach
= !(dw10
& 0xf);
5191 uint16_t *nr_ids
= &list
[0];
5192 uint16_t *ids
= &list
[1];
5196 trace_pci_nvme_ns_attachment(nvme_cid(req
), dw10
& 0xf);
5198 if (!nvme_nsid_valid(n
, nsid
)) {
5199 return NVME_INVALID_NSID
| NVME_DNR
;
5202 ns
= nvme_subsys_ns(n
->subsys
, nsid
);
5204 return NVME_INVALID_FIELD
| NVME_DNR
;
5207 ret
= nvme_h2c(n
, (uint8_t *)list
, 4096, req
);
5213 return NVME_NS_CTRL_LIST_INVALID
| NVME_DNR
;
5216 *nr_ids
= MIN(*nr_ids
, NVME_CONTROLLER_LIST_SIZE
- 1);
5217 for (i
= 0; i
< *nr_ids
; i
++) {
5218 ctrl
= nvme_subsys_ctrl(n
->subsys
, ids
[i
]);
5220 return NVME_NS_CTRL_LIST_INVALID
| NVME_DNR
;
5224 if (nvme_ns(ctrl
, nsid
)) {
5225 return NVME_NS_ALREADY_ATTACHED
| NVME_DNR
;
5228 if (ns
->attached
&& !ns
->params
.shared
) {
5229 return NVME_NS_PRIVATE
| NVME_DNR
;
5232 nvme_attach_ns(ctrl
, ns
);
5233 nvme_select_iocs_ns(ctrl
, ns
);
5235 if (!nvme_ns(ctrl
, nsid
)) {
5236 return NVME_NS_NOT_ATTACHED
| NVME_DNR
;
5239 ctrl
->namespaces
[nsid
] = NULL
;
5242 nvme_update_dmrsl(ctrl
);
5246 * Add namespace id to the changed namespace id list for event clearing
5247 * via Get Log Page command.
5249 if (!test_and_set_bit(nsid
, ctrl
->changed_nsids
)) {
5250 nvme_enqueue_event(ctrl
, NVME_AER_TYPE_NOTICE
,
5251 NVME_AER_INFO_NOTICE_NS_ATTR_CHANGED
,
5252 NVME_LOG_CHANGED_NSLIST
);
5256 return NVME_SUCCESS
;
5259 typedef struct NvmeFormatAIOCB
{
5272 static void nvme_format_bh(void *opaque
);
5274 static void nvme_format_cancel(BlockAIOCB
*aiocb
)
5276 NvmeFormatAIOCB
*iocb
= container_of(aiocb
, NvmeFormatAIOCB
, common
);
5279 blk_aio_cancel_async(iocb
->aiocb
);
5283 static const AIOCBInfo nvme_format_aiocb_info
= {
5284 .aiocb_size
= sizeof(NvmeFormatAIOCB
),
5285 .cancel_async
= nvme_format_cancel
,
5286 .get_aio_context
= nvme_get_aio_context
,
5289 static void nvme_format_set(NvmeNamespace
*ns
, NvmeCmd
*cmd
)
5291 uint32_t dw10
= le32_to_cpu(cmd
->cdw10
);
5292 uint8_t lbaf
= dw10
& 0xf;
5293 uint8_t pi
= (dw10
>> 5) & 0x7;
5294 uint8_t mset
= (dw10
>> 4) & 0x1;
5295 uint8_t pil
= (dw10
>> 8) & 0x1;
5297 trace_pci_nvme_format_set(ns
->params
.nsid
, lbaf
, mset
, pi
, pil
);
5299 ns
->id_ns
.dps
= (pil
<< 3) | pi
;
5300 ns
->id_ns
.flbas
= lbaf
| (mset
<< 4);
5302 nvme_ns_init_format(ns
);
5305 static void nvme_format_ns_cb(void *opaque
, int ret
)
5307 NvmeFormatAIOCB
*iocb
= opaque
;
5308 NvmeRequest
*req
= iocb
->req
;
5309 NvmeNamespace
*ns
= iocb
->ns
;
5319 if (iocb
->offset
< ns
->size
) {
5320 bytes
= MIN(BDRV_REQUEST_MAX_BYTES
, ns
->size
- iocb
->offset
);
5322 iocb
->aiocb
= blk_aio_pwrite_zeroes(ns
->blkconf
.blk
, iocb
->offset
,
5323 bytes
, BDRV_REQ_MAY_UNMAP
,
5324 nvme_format_ns_cb
, iocb
);
5326 iocb
->offset
+= bytes
;
5330 nvme_format_set(ns
, &req
->cmd
);
5337 qemu_bh_schedule(iocb
->bh
);
5340 static uint16_t nvme_format_check(NvmeNamespace
*ns
, uint8_t lbaf
, uint8_t pi
)
5342 if (ns
->params
.zoned
) {
5343 return NVME_INVALID_FORMAT
| NVME_DNR
;
5346 if (lbaf
> ns
->id_ns
.nlbaf
) {
5347 return NVME_INVALID_FORMAT
| NVME_DNR
;
5350 if (pi
&& (ns
->id_ns
.lbaf
[lbaf
].ms
< sizeof(NvmeDifTuple
))) {
5351 return NVME_INVALID_FORMAT
| NVME_DNR
;
5354 if (pi
&& pi
> NVME_ID_NS_DPS_TYPE_3
) {
5355 return NVME_INVALID_FIELD
| NVME_DNR
;
5358 return NVME_SUCCESS
;
5361 static void nvme_format_bh(void *opaque
)
5363 NvmeFormatAIOCB
*iocb
= opaque
;
5364 NvmeRequest
*req
= iocb
->req
;
5365 NvmeCtrl
*n
= nvme_ctrl(req
);
5366 uint32_t dw10
= le32_to_cpu(req
->cmd
.cdw10
);
5367 uint8_t lbaf
= dw10
& 0xf;
5368 uint8_t pi
= (dw10
>> 5) & 0x7;
5372 if (iocb
->ret
< 0) {
5376 if (iocb
->broadcast
) {
5377 for (i
= iocb
->nsid
+ 1; i
<= NVME_MAX_NAMESPACES
; i
++) {
5378 iocb
->ns
= nvme_ns(n
, i
);
5390 status
= nvme_format_check(iocb
->ns
, lbaf
, pi
);
5392 req
->status
= status
;
5396 iocb
->ns
->status
= NVME_FORMAT_IN_PROGRESS
;
5397 nvme_format_ns_cb(iocb
, 0);
5401 qemu_bh_delete(iocb
->bh
);
5404 iocb
->common
.cb(iocb
->common
.opaque
, iocb
->ret
);
5406 qemu_aio_unref(iocb
);
5409 static uint16_t nvme_format(NvmeCtrl
*n
, NvmeRequest
*req
)
5411 NvmeFormatAIOCB
*iocb
;
5412 uint32_t nsid
= le32_to_cpu(req
->cmd
.nsid
);
5415 iocb
= qemu_aio_get(&nvme_format_aiocb_info
, NULL
, nvme_misc_cb
, req
);
5418 iocb
->bh
= qemu_bh_new(nvme_format_bh
, iocb
);
5422 iocb
->broadcast
= (nsid
== NVME_NSID_BROADCAST
);
5425 if (!iocb
->broadcast
) {
5426 if (!nvme_nsid_valid(n
, nsid
)) {
5427 status
= NVME_INVALID_NSID
| NVME_DNR
;
5431 iocb
->ns
= nvme_ns(n
, nsid
);
5433 status
= NVME_INVALID_FIELD
| NVME_DNR
;
5438 req
->aiocb
= &iocb
->common
;
5439 qemu_bh_schedule(iocb
->bh
);
5441 return NVME_NO_COMPLETE
;
5444 qemu_bh_delete(iocb
->bh
);
5446 qemu_aio_unref(iocb
);
5450 static uint16_t nvme_admin_cmd(NvmeCtrl
*n
, NvmeRequest
*req
)
5452 trace_pci_nvme_admin_cmd(nvme_cid(req
), nvme_sqid(req
), req
->cmd
.opcode
,
5453 nvme_adm_opc_str(req
->cmd
.opcode
));
5455 if (!(nvme_cse_acs
[req
->cmd
.opcode
] & NVME_CMD_EFF_CSUPP
)) {
5456 trace_pci_nvme_err_invalid_admin_opc(req
->cmd
.opcode
);
5457 return NVME_INVALID_OPCODE
| NVME_DNR
;
5460 /* SGLs shall not be used for Admin commands in NVMe over PCIe */
5461 if (NVME_CMD_FLAGS_PSDT(req
->cmd
.flags
) != NVME_PSDT_PRP
) {
5462 return NVME_INVALID_FIELD
| NVME_DNR
;
5465 switch (req
->cmd
.opcode
) {
5466 case NVME_ADM_CMD_DELETE_SQ
:
5467 return nvme_del_sq(n
, req
);
5468 case NVME_ADM_CMD_CREATE_SQ
:
5469 return nvme_create_sq(n
, req
);
5470 case NVME_ADM_CMD_GET_LOG_PAGE
:
5471 return nvme_get_log(n
, req
);
5472 case NVME_ADM_CMD_DELETE_CQ
:
5473 return nvme_del_cq(n
, req
);
5474 case NVME_ADM_CMD_CREATE_CQ
:
5475 return nvme_create_cq(n
, req
);
5476 case NVME_ADM_CMD_IDENTIFY
:
5477 return nvme_identify(n
, req
);
5478 case NVME_ADM_CMD_ABORT
:
5479 return nvme_abort(n
, req
);
5480 case NVME_ADM_CMD_SET_FEATURES
:
5481 return nvme_set_feature(n
, req
);
5482 case NVME_ADM_CMD_GET_FEATURES
:
5483 return nvme_get_feature(n
, req
);
5484 case NVME_ADM_CMD_ASYNC_EV_REQ
:
5485 return nvme_aer(n
, req
);
5486 case NVME_ADM_CMD_NS_ATTACHMENT
:
5487 return nvme_ns_attachment(n
, req
);
5488 case NVME_ADM_CMD_FORMAT_NVM
:
5489 return nvme_format(n
, req
);
5494 return NVME_INVALID_OPCODE
| NVME_DNR
;
5497 static void nvme_process_sq(void *opaque
)
5499 NvmeSQueue
*sq
= opaque
;
5500 NvmeCtrl
*n
= sq
->ctrl
;
5501 NvmeCQueue
*cq
= n
->cq
[sq
->cqid
];
5508 while (!(nvme_sq_empty(sq
) || QTAILQ_EMPTY(&sq
->req_list
))) {
5509 addr
= sq
->dma_addr
+ sq
->head
* n
->sqe_size
;
5510 if (nvme_addr_read(n
, addr
, (void *)&cmd
, sizeof(cmd
))) {
5511 trace_pci_nvme_err_addr_read(addr
);
5512 trace_pci_nvme_err_cfs();
5513 n
->bar
.csts
= NVME_CSTS_FAILED
;
5516 nvme_inc_sq_head(sq
);
5518 req
= QTAILQ_FIRST(&sq
->req_list
);
5519 QTAILQ_REMOVE(&sq
->req_list
, req
, entry
);
5520 QTAILQ_INSERT_TAIL(&sq
->out_req_list
, req
, entry
);
5521 nvme_req_clear(req
);
5522 req
->cqe
.cid
= cmd
.cid
;
5523 memcpy(&req
->cmd
, &cmd
, sizeof(NvmeCmd
));
5525 status
= sq
->sqid
? nvme_io_cmd(n
, req
) :
5526 nvme_admin_cmd(n
, req
);
5527 if (status
!= NVME_NO_COMPLETE
) {
5528 req
->status
= status
;
5529 nvme_enqueue_req_completion(cq
, req
);
5534 static void nvme_ctrl_reset(NvmeCtrl
*n
)
5539 for (i
= 1; i
<= NVME_MAX_NAMESPACES
; i
++) {
5548 for (i
= 0; i
< n
->params
.max_ioqpairs
+ 1; i
++) {
5549 if (n
->sq
[i
] != NULL
) {
5550 nvme_free_sq(n
->sq
[i
], n
);
5553 for (i
= 0; i
< n
->params
.max_ioqpairs
+ 1; i
++) {
5554 if (n
->cq
[i
] != NULL
) {
5555 nvme_free_cq(n
->cq
[i
], n
);
5559 while (!QTAILQ_EMPTY(&n
->aer_queue
)) {
5560 NvmeAsyncEvent
*event
= QTAILQ_FIRST(&n
->aer_queue
);
5561 QTAILQ_REMOVE(&n
->aer_queue
, event
, entry
);
5566 n
->outstanding_aers
= 0;
5567 n
->qs_created
= false;
5572 static void nvme_ctrl_shutdown(NvmeCtrl
*n
)
5578 memory_region_msync(&n
->pmr
.dev
->mr
, 0, n
->pmr
.dev
->size
);
5581 for (i
= 1; i
<= NVME_MAX_NAMESPACES
; i
++) {
5587 nvme_ns_shutdown(ns
);
5591 static void nvme_select_iocs(NvmeCtrl
*n
)
5596 for (i
= 1; i
<= NVME_MAX_NAMESPACES
; i
++) {
5602 nvme_select_iocs_ns(n
, ns
);
5606 static int nvme_start_ctrl(NvmeCtrl
*n
)
5608 uint32_t page_bits
= NVME_CC_MPS(n
->bar
.cc
) + 12;
5609 uint32_t page_size
= 1 << page_bits
;
5611 if (unlikely(n
->cq
[0])) {
5612 trace_pci_nvme_err_startfail_cq();
5615 if (unlikely(n
->sq
[0])) {
5616 trace_pci_nvme_err_startfail_sq();
5619 if (unlikely(!n
->bar
.asq
)) {
5620 trace_pci_nvme_err_startfail_nbarasq();
5623 if (unlikely(!n
->bar
.acq
)) {
5624 trace_pci_nvme_err_startfail_nbaracq();
5627 if (unlikely(n
->bar
.asq
& (page_size
- 1))) {
5628 trace_pci_nvme_err_startfail_asq_misaligned(n
->bar
.asq
);
5631 if (unlikely(n
->bar
.acq
& (page_size
- 1))) {
5632 trace_pci_nvme_err_startfail_acq_misaligned(n
->bar
.acq
);
5635 if (unlikely(!(NVME_CAP_CSS(n
->bar
.cap
) & (1 << NVME_CC_CSS(n
->bar
.cc
))))) {
5636 trace_pci_nvme_err_startfail_css(NVME_CC_CSS(n
->bar
.cc
));
5639 if (unlikely(NVME_CC_MPS(n
->bar
.cc
) <
5640 NVME_CAP_MPSMIN(n
->bar
.cap
))) {
5641 trace_pci_nvme_err_startfail_page_too_small(
5642 NVME_CC_MPS(n
->bar
.cc
),
5643 NVME_CAP_MPSMIN(n
->bar
.cap
));
5646 if (unlikely(NVME_CC_MPS(n
->bar
.cc
) >
5647 NVME_CAP_MPSMAX(n
->bar
.cap
))) {
5648 trace_pci_nvme_err_startfail_page_too_large(
5649 NVME_CC_MPS(n
->bar
.cc
),
5650 NVME_CAP_MPSMAX(n
->bar
.cap
));
5653 if (unlikely(NVME_CC_IOCQES(n
->bar
.cc
) <
5654 NVME_CTRL_CQES_MIN(n
->id_ctrl
.cqes
))) {
5655 trace_pci_nvme_err_startfail_cqent_too_small(
5656 NVME_CC_IOCQES(n
->bar
.cc
),
5657 NVME_CTRL_CQES_MIN(n
->bar
.cap
));
5660 if (unlikely(NVME_CC_IOCQES(n
->bar
.cc
) >
5661 NVME_CTRL_CQES_MAX(n
->id_ctrl
.cqes
))) {
5662 trace_pci_nvme_err_startfail_cqent_too_large(
5663 NVME_CC_IOCQES(n
->bar
.cc
),
5664 NVME_CTRL_CQES_MAX(n
->bar
.cap
));
5667 if (unlikely(NVME_CC_IOSQES(n
->bar
.cc
) <
5668 NVME_CTRL_SQES_MIN(n
->id_ctrl
.sqes
))) {
5669 trace_pci_nvme_err_startfail_sqent_too_small(
5670 NVME_CC_IOSQES(n
->bar
.cc
),
5671 NVME_CTRL_SQES_MIN(n
->bar
.cap
));
5674 if (unlikely(NVME_CC_IOSQES(n
->bar
.cc
) >
5675 NVME_CTRL_SQES_MAX(n
->id_ctrl
.sqes
))) {
5676 trace_pci_nvme_err_startfail_sqent_too_large(
5677 NVME_CC_IOSQES(n
->bar
.cc
),
5678 NVME_CTRL_SQES_MAX(n
->bar
.cap
));
5681 if (unlikely(!NVME_AQA_ASQS(n
->bar
.aqa
))) {
5682 trace_pci_nvme_err_startfail_asqent_sz_zero();
5685 if (unlikely(!NVME_AQA_ACQS(n
->bar
.aqa
))) {
5686 trace_pci_nvme_err_startfail_acqent_sz_zero();
5690 n
->page_bits
= page_bits
;
5691 n
->page_size
= page_size
;
5692 n
->max_prp_ents
= n
->page_size
/ sizeof(uint64_t);
5693 n
->cqe_size
= 1 << NVME_CC_IOCQES(n
->bar
.cc
);
5694 n
->sqe_size
= 1 << NVME_CC_IOSQES(n
->bar
.cc
);
5695 nvme_init_cq(&n
->admin_cq
, n
, n
->bar
.acq
, 0, 0,
5696 NVME_AQA_ACQS(n
->bar
.aqa
) + 1, 1);
5697 nvme_init_sq(&n
->admin_sq
, n
, n
->bar
.asq
, 0, 0,
5698 NVME_AQA_ASQS(n
->bar
.aqa
) + 1);
5700 nvme_set_timestamp(n
, 0ULL);
5702 QTAILQ_INIT(&n
->aer_queue
);
5704 nvme_select_iocs(n
);
5709 static void nvme_cmb_enable_regs(NvmeCtrl
*n
)
5711 NVME_CMBLOC_SET_CDPCILS(n
->bar
.cmbloc
, 1);
5712 NVME_CMBLOC_SET_CDPMLS(n
->bar
.cmbloc
, 1);
5713 NVME_CMBLOC_SET_BIR(n
->bar
.cmbloc
, NVME_CMB_BIR
);
5715 NVME_CMBSZ_SET_SQS(n
->bar
.cmbsz
, 1);
5716 NVME_CMBSZ_SET_CQS(n
->bar
.cmbsz
, 0);
5717 NVME_CMBSZ_SET_LISTS(n
->bar
.cmbsz
, 1);
5718 NVME_CMBSZ_SET_RDS(n
->bar
.cmbsz
, 1);
5719 NVME_CMBSZ_SET_WDS(n
->bar
.cmbsz
, 1);
5720 NVME_CMBSZ_SET_SZU(n
->bar
.cmbsz
, 2); /* MBs */
5721 NVME_CMBSZ_SET_SZ(n
->bar
.cmbsz
, n
->params
.cmb_size_mb
);
5724 static void nvme_write_bar(NvmeCtrl
*n
, hwaddr offset
, uint64_t data
,
5727 if (unlikely(offset
& (sizeof(uint32_t) - 1))) {
5728 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_misaligned32
,
5729 "MMIO write not 32-bit aligned,"
5730 " offset=0x%"PRIx64
"", offset
);
5731 /* should be ignored, fall through for now */
5734 if (unlikely(size
< sizeof(uint32_t))) {
5735 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_toosmall
,
5736 "MMIO write smaller than 32-bits,"
5737 " offset=0x%"PRIx64
", size=%u",
5739 /* should be ignored, fall through for now */
5743 case 0xc: /* INTMS */
5744 if (unlikely(msix_enabled(&(n
->parent_obj
)))) {
5745 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_intmask_with_msix
,
5746 "undefined access to interrupt mask set"
5747 " when MSI-X is enabled");
5748 /* should be ignored, fall through for now */
5750 n
->bar
.intms
|= data
& 0xffffffff;
5751 n
->bar
.intmc
= n
->bar
.intms
;
5752 trace_pci_nvme_mmio_intm_set(data
& 0xffffffff, n
->bar
.intmc
);
5755 case 0x10: /* INTMC */
5756 if (unlikely(msix_enabled(&(n
->parent_obj
)))) {
5757 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_intmask_with_msix
,
5758 "undefined access to interrupt mask clr"
5759 " when MSI-X is enabled");
5760 /* should be ignored, fall through for now */
5762 n
->bar
.intms
&= ~(data
& 0xffffffff);
5763 n
->bar
.intmc
= n
->bar
.intms
;
5764 trace_pci_nvme_mmio_intm_clr(data
& 0xffffffff, n
->bar
.intmc
);
5768 trace_pci_nvme_mmio_cfg(data
& 0xffffffff);
5769 /* Windows first sends data, then sends enable bit */
5770 if (!NVME_CC_EN(data
) && !NVME_CC_EN(n
->bar
.cc
) &&
5771 !NVME_CC_SHN(data
) && !NVME_CC_SHN(n
->bar
.cc
))
5776 if (NVME_CC_EN(data
) && !NVME_CC_EN(n
->bar
.cc
)) {
5778 if (unlikely(nvme_start_ctrl(n
))) {
5779 trace_pci_nvme_err_startfail();
5780 n
->bar
.csts
= NVME_CSTS_FAILED
;
5782 trace_pci_nvme_mmio_start_success();
5783 n
->bar
.csts
= NVME_CSTS_READY
;
5785 } else if (!NVME_CC_EN(data
) && NVME_CC_EN(n
->bar
.cc
)) {
5786 trace_pci_nvme_mmio_stopped();
5788 n
->bar
.csts
&= ~NVME_CSTS_READY
;
5790 if (NVME_CC_SHN(data
) && !(NVME_CC_SHN(n
->bar
.cc
))) {
5791 trace_pci_nvme_mmio_shutdown_set();
5792 nvme_ctrl_shutdown(n
);
5794 n
->bar
.csts
|= NVME_CSTS_SHST_COMPLETE
;
5795 } else if (!NVME_CC_SHN(data
) && NVME_CC_SHN(n
->bar
.cc
)) {
5796 trace_pci_nvme_mmio_shutdown_cleared();
5797 n
->bar
.csts
&= ~NVME_CSTS_SHST_COMPLETE
;
5801 case 0x1c: /* CSTS */
5802 if (data
& (1 << 4)) {
5803 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_ssreset_w1c_unsupported
,
5804 "attempted to W1C CSTS.NSSRO"
5805 " but CAP.NSSRS is zero (not supported)");
5806 } else if (data
!= 0) {
5807 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_ro_csts
,
5808 "attempted to set a read only bit"
5809 " of controller status");
5812 case 0x20: /* NSSR */
5813 if (data
== 0x4e564d65) {
5814 trace_pci_nvme_ub_mmiowr_ssreset_unsupported();
5816 /* The spec says that writes of other values have no effect */
5820 case 0x24: /* AQA */
5821 n
->bar
.aqa
= data
& 0xffffffff;
5822 trace_pci_nvme_mmio_aqattr(data
& 0xffffffff);
5824 case 0x28: /* ASQ */
5825 n
->bar
.asq
= size
== 8 ? data
:
5826 (n
->bar
.asq
& ~0xffffffffULL
) | (data
& 0xffffffff);
5827 trace_pci_nvme_mmio_asqaddr(data
);
5829 case 0x2c: /* ASQ hi */
5830 n
->bar
.asq
= (n
->bar
.asq
& 0xffffffff) | (data
<< 32);
5831 trace_pci_nvme_mmio_asqaddr_hi(data
, n
->bar
.asq
);
5833 case 0x30: /* ACQ */
5834 trace_pci_nvme_mmio_acqaddr(data
);
5835 n
->bar
.acq
= size
== 8 ? data
:
5836 (n
->bar
.acq
& ~0xffffffffULL
) | (data
& 0xffffffff);
5838 case 0x34: /* ACQ hi */
5839 n
->bar
.acq
= (n
->bar
.acq
& 0xffffffff) | (data
<< 32);
5840 trace_pci_nvme_mmio_acqaddr_hi(data
, n
->bar
.acq
);
5842 case 0x38: /* CMBLOC */
5843 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_cmbloc_reserved
,
5844 "invalid write to reserved CMBLOC"
5845 " when CMBSZ is zero, ignored");
5847 case 0x3C: /* CMBSZ */
5848 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_cmbsz_readonly
,
5849 "invalid write to read only CMBSZ, ignored");
5851 case 0x50: /* CMBMSC */
5852 if (!NVME_CAP_CMBS(n
->bar
.cap
)) {
5856 n
->bar
.cmbmsc
= size
== 8 ? data
:
5857 (n
->bar
.cmbmsc
& ~0xffffffff) | (data
& 0xffffffff);
5858 n
->cmb
.cmse
= false;
5860 if (NVME_CMBMSC_CRE(data
)) {
5861 nvme_cmb_enable_regs(n
);
5863 if (NVME_CMBMSC_CMSE(data
)) {
5864 hwaddr cba
= NVME_CMBMSC_CBA(data
) << CMBMSC_CBA_SHIFT
;
5865 if (cba
+ int128_get64(n
->cmb
.mem
.size
) < cba
) {
5866 NVME_CMBSTS_SET_CBAI(n
->bar
.cmbsts
, 1);
5879 case 0x54: /* CMBMSC hi */
5880 n
->bar
.cmbmsc
= (n
->bar
.cmbmsc
& 0xffffffff) | (data
<< 32);
5883 case 0xe00: /* PMRCAP */
5884 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrcap_readonly
,
5885 "invalid write to PMRCAP register, ignored");
5887 case 0xe04: /* PMRCTL */
5888 if (!NVME_CAP_PMRS(n
->bar
.cap
)) {
5892 n
->bar
.pmrctl
= data
;
5893 if (NVME_PMRCTL_EN(data
)) {
5894 memory_region_set_enabled(&n
->pmr
.dev
->mr
, true);
5897 memory_region_set_enabled(&n
->pmr
.dev
->mr
, false);
5898 NVME_PMRSTS_SET_NRDY(n
->bar
.pmrsts
, 1);
5899 n
->pmr
.cmse
= false;
5902 case 0xe08: /* PMRSTS */
5903 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrsts_readonly
,
5904 "invalid write to PMRSTS register, ignored");
5906 case 0xe0C: /* PMREBS */
5907 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrebs_readonly
,
5908 "invalid write to PMREBS register, ignored");
5910 case 0xe10: /* PMRSWTP */
5911 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrswtp_readonly
,
5912 "invalid write to PMRSWTP register, ignored");
5914 case 0xe14: /* PMRMSCL */
5915 if (!NVME_CAP_PMRS(n
->bar
.cap
)) {
5919 n
->bar
.pmrmsc
= (n
->bar
.pmrmsc
& ~0xffffffff) | (data
& 0xffffffff);
5920 n
->pmr
.cmse
= false;
5922 if (NVME_PMRMSC_CMSE(n
->bar
.pmrmsc
)) {
5923 hwaddr cba
= NVME_PMRMSC_CBA(n
->bar
.pmrmsc
) << PMRMSC_CBA_SHIFT
;
5924 if (cba
+ int128_get64(n
->pmr
.dev
->mr
.size
) < cba
) {
5925 NVME_PMRSTS_SET_CBAI(n
->bar
.pmrsts
, 1);
5934 case 0xe18: /* PMRMSCU */
5935 if (!NVME_CAP_PMRS(n
->bar
.cap
)) {
5939 n
->bar
.pmrmsc
= (n
->bar
.pmrmsc
& 0xffffffff) | (data
<< 32);
5942 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_invalid
,
5943 "invalid MMIO write,"
5944 " offset=0x%"PRIx64
", data=%"PRIx64
"",
5950 static uint64_t nvme_mmio_read(void *opaque
, hwaddr addr
, unsigned size
)
5952 NvmeCtrl
*n
= (NvmeCtrl
*)opaque
;
5953 uint8_t *ptr
= (uint8_t *)&n
->bar
;
5956 trace_pci_nvme_mmio_read(addr
, size
);
5958 if (unlikely(addr
& (sizeof(uint32_t) - 1))) {
5959 NVME_GUEST_ERR(pci_nvme_ub_mmiord_misaligned32
,
5960 "MMIO read not 32-bit aligned,"
5961 " offset=0x%"PRIx64
"", addr
);
5962 /* should RAZ, fall through for now */
5963 } else if (unlikely(size
< sizeof(uint32_t))) {
5964 NVME_GUEST_ERR(pci_nvme_ub_mmiord_toosmall
,
5965 "MMIO read smaller than 32-bits,"
5966 " offset=0x%"PRIx64
"", addr
);
5967 /* should RAZ, fall through for now */
5970 if (addr
< sizeof(n
->bar
)) {
5972 * When PMRWBM bit 1 is set then read from
5973 * from PMRSTS should ensure prior writes
5974 * made it to persistent media
5976 if (addr
== 0xe08 &&
5977 (NVME_PMRCAP_PMRWBM(n
->bar
.pmrcap
) & 0x02)) {
5978 memory_region_msync(&n
->pmr
.dev
->mr
, 0, n
->pmr
.dev
->size
);
5980 memcpy(&val
, ptr
+ addr
, size
);
5982 NVME_GUEST_ERR(pci_nvme_ub_mmiord_invalid_ofs
,
5983 "MMIO read beyond last register,"
5984 " offset=0x%"PRIx64
", returning 0", addr
);
5990 static void nvme_process_db(NvmeCtrl
*n
, hwaddr addr
, int val
)
5994 if (unlikely(addr
& ((1 << 2) - 1))) {
5995 NVME_GUEST_ERR(pci_nvme_ub_db_wr_misaligned
,
5996 "doorbell write not 32-bit aligned,"
5997 " offset=0x%"PRIx64
", ignoring", addr
);
6001 if (((addr
- 0x1000) >> 2) & 1) {
6002 /* Completion queue doorbell write */
6004 uint16_t new_head
= val
& 0xffff;
6008 qid
= (addr
- (0x1000 + (1 << 2))) >> 3;
6009 if (unlikely(nvme_check_cqid(n
, qid
))) {
6010 NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_cq
,
6011 "completion queue doorbell write"
6012 " for nonexistent queue,"
6013 " sqid=%"PRIu32
", ignoring", qid
);
6016 * NVM Express v1.3d, Section 4.1 state: "If host software writes
6017 * an invalid value to the Submission Queue Tail Doorbell or
6018 * Completion Queue Head Doorbell regiter and an Asynchronous Event
6019 * Request command is outstanding, then an asynchronous event is
6020 * posted to the Admin Completion Queue with a status code of
6021 * Invalid Doorbell Write Value."
6023 * Also note that the spec includes the "Invalid Doorbell Register"
6024 * status code, but nowhere does it specify when to use it.
6025 * However, it seems reasonable to use it here in a similar
6028 if (n
->outstanding_aers
) {
6029 nvme_enqueue_event(n
, NVME_AER_TYPE_ERROR
,
6030 NVME_AER_INFO_ERR_INVALID_DB_REGISTER
,
6031 NVME_LOG_ERROR_INFO
);
6038 if (unlikely(new_head
>= cq
->size
)) {
6039 NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_cqhead
,
6040 "completion queue doorbell write value"
6041 " beyond queue size, sqid=%"PRIu32
","
6042 " new_head=%"PRIu16
", ignoring",
6045 if (n
->outstanding_aers
) {
6046 nvme_enqueue_event(n
, NVME_AER_TYPE_ERROR
,
6047 NVME_AER_INFO_ERR_INVALID_DB_VALUE
,
6048 NVME_LOG_ERROR_INFO
);
6054 trace_pci_nvme_mmio_doorbell_cq(cq
->cqid
, new_head
);
6056 start_sqs
= nvme_cq_full(cq
) ? 1 : 0;
6057 cq
->head
= new_head
;
6060 QTAILQ_FOREACH(sq
, &cq
->sq_list
, entry
) {
6061 timer_mod(sq
->timer
, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) + 500);
6063 timer_mod(cq
->timer
, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) + 500);
6066 if (cq
->tail
== cq
->head
) {
6067 if (cq
->irq_enabled
) {
6071 nvme_irq_deassert(n
, cq
);
6074 /* Submission queue doorbell write */
6076 uint16_t new_tail
= val
& 0xffff;
6079 qid
= (addr
- 0x1000) >> 3;
6080 if (unlikely(nvme_check_sqid(n
, qid
))) {
6081 NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_sq
,
6082 "submission queue doorbell write"
6083 " for nonexistent queue,"
6084 " sqid=%"PRIu32
", ignoring", qid
);
6086 if (n
->outstanding_aers
) {
6087 nvme_enqueue_event(n
, NVME_AER_TYPE_ERROR
,
6088 NVME_AER_INFO_ERR_INVALID_DB_REGISTER
,
6089 NVME_LOG_ERROR_INFO
);
6096 if (unlikely(new_tail
>= sq
->size
)) {
6097 NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_sqtail
,
6098 "submission queue doorbell write value"
6099 " beyond queue size, sqid=%"PRIu32
","
6100 " new_tail=%"PRIu16
", ignoring",
6103 if (n
->outstanding_aers
) {
6104 nvme_enqueue_event(n
, NVME_AER_TYPE_ERROR
,
6105 NVME_AER_INFO_ERR_INVALID_DB_VALUE
,
6106 NVME_LOG_ERROR_INFO
);
6112 trace_pci_nvme_mmio_doorbell_sq(sq
->sqid
, new_tail
);
6114 sq
->tail
= new_tail
;
6115 timer_mod(sq
->timer
, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) + 500);
6119 static void nvme_mmio_write(void *opaque
, hwaddr addr
, uint64_t data
,
6122 NvmeCtrl
*n
= (NvmeCtrl
*)opaque
;
6124 trace_pci_nvme_mmio_write(addr
, data
, size
);
6126 if (addr
< sizeof(n
->bar
)) {
6127 nvme_write_bar(n
, addr
, data
, size
);
6129 nvme_process_db(n
, addr
, data
);
6133 static const MemoryRegionOps nvme_mmio_ops
= {
6134 .read
= nvme_mmio_read
,
6135 .write
= nvme_mmio_write
,
6136 .endianness
= DEVICE_LITTLE_ENDIAN
,
6138 .min_access_size
= 2,
6139 .max_access_size
= 8,
6143 static void nvme_cmb_write(void *opaque
, hwaddr addr
, uint64_t data
,
6146 NvmeCtrl
*n
= (NvmeCtrl
*)opaque
;
6147 stn_le_p(&n
->cmb
.buf
[addr
], size
, data
);
6150 static uint64_t nvme_cmb_read(void *opaque
, hwaddr addr
, unsigned size
)
6152 NvmeCtrl
*n
= (NvmeCtrl
*)opaque
;
6153 return ldn_le_p(&n
->cmb
.buf
[addr
], size
);
6156 static const MemoryRegionOps nvme_cmb_ops
= {
6157 .read
= nvme_cmb_read
,
6158 .write
= nvme_cmb_write
,
6159 .endianness
= DEVICE_LITTLE_ENDIAN
,
6161 .min_access_size
= 1,
6162 .max_access_size
= 8,
6166 static void nvme_check_constraints(NvmeCtrl
*n
, Error
**errp
)
6168 NvmeParams
*params
= &n
->params
;
6170 if (params
->num_queues
) {
6171 warn_report("num_queues is deprecated; please use max_ioqpairs "
6174 params
->max_ioqpairs
= params
->num_queues
- 1;
6177 if (n
->namespace.blkconf
.blk
&& n
->subsys
) {
6178 error_setg(errp
, "subsystem support is unavailable with legacy "
6179 "namespace ('drive' property)");
6183 if (params
->max_ioqpairs
< 1 ||
6184 params
->max_ioqpairs
> NVME_MAX_IOQPAIRS
) {
6185 error_setg(errp
, "max_ioqpairs must be between 1 and %d",
6190 if (params
->msix_qsize
< 1 ||
6191 params
->msix_qsize
> PCI_MSIX_FLAGS_QSIZE
+ 1) {
6192 error_setg(errp
, "msix_qsize must be between 1 and %d",
6193 PCI_MSIX_FLAGS_QSIZE
+ 1);
6197 if (!params
->serial
) {
6198 error_setg(errp
, "serial property not set");
6203 if (host_memory_backend_is_mapped(n
->pmr
.dev
)) {
6204 error_setg(errp
, "can't use already busy memdev: %s",
6205 object_get_canonical_path_component(OBJECT(n
->pmr
.dev
)));
6209 if (!is_power_of_2(n
->pmr
.dev
->size
)) {
6210 error_setg(errp
, "pmr backend size needs to be power of 2 in size");
6214 host_memory_backend_set_mapped(n
->pmr
.dev
, true);
6217 if (n
->params
.zasl
> n
->params
.mdts
) {
6218 error_setg(errp
, "zoned.zasl (Zone Append Size Limit) must be less "
6219 "than or equal to mdts (Maximum Data Transfer Size)");
6223 if (!n
->params
.vsl
) {
6224 error_setg(errp
, "vsl must be non-zero");
6229 static void nvme_init_state(NvmeCtrl
*n
)
6231 /* add one to max_ioqpairs to account for the admin queue pair */
6232 n
->reg_size
= pow2ceil(sizeof(NvmeBar
) +
6233 2 * (n
->params
.max_ioqpairs
+ 1) * NVME_DB_SIZE
);
6234 n
->sq
= g_new0(NvmeSQueue
*, n
->params
.max_ioqpairs
+ 1);
6235 n
->cq
= g_new0(NvmeCQueue
*, n
->params
.max_ioqpairs
+ 1);
6236 n
->temperature
= NVME_TEMPERATURE
;
6237 n
->features
.temp_thresh_hi
= NVME_TEMPERATURE_WARNING
;
6238 n
->starttime_ms
= qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
);
6239 n
->aer_reqs
= g_new0(NvmeRequest
*, n
->params
.aerl
+ 1);
6242 static void nvme_init_cmb(NvmeCtrl
*n
, PCIDevice
*pci_dev
)
6244 uint64_t cmb_size
= n
->params
.cmb_size_mb
* MiB
;
6246 n
->cmb
.buf
= g_malloc0(cmb_size
);
6247 memory_region_init_io(&n
->cmb
.mem
, OBJECT(n
), &nvme_cmb_ops
, n
,
6248 "nvme-cmb", cmb_size
);
6249 pci_register_bar(pci_dev
, NVME_CMB_BIR
,
6250 PCI_BASE_ADDRESS_SPACE_MEMORY
|
6251 PCI_BASE_ADDRESS_MEM_TYPE_64
|
6252 PCI_BASE_ADDRESS_MEM_PREFETCH
, &n
->cmb
.mem
);
6254 NVME_CAP_SET_CMBS(n
->bar
.cap
, 1);
6256 if (n
->params
.legacy_cmb
) {
6257 nvme_cmb_enable_regs(n
);
6262 static void nvme_init_pmr(NvmeCtrl
*n
, PCIDevice
*pci_dev
)
6264 NVME_PMRCAP_SET_RDS(n
->bar
.pmrcap
, 1);
6265 NVME_PMRCAP_SET_WDS(n
->bar
.pmrcap
, 1);
6266 NVME_PMRCAP_SET_BIR(n
->bar
.pmrcap
, NVME_PMR_BIR
);
6267 /* Turn on bit 1 support */
6268 NVME_PMRCAP_SET_PMRWBM(n
->bar
.pmrcap
, 0x02);
6269 NVME_PMRCAP_SET_CMSS(n
->bar
.pmrcap
, 1);
6271 pci_register_bar(pci_dev
, NVME_PMRCAP_BIR(n
->bar
.pmrcap
),
6272 PCI_BASE_ADDRESS_SPACE_MEMORY
|
6273 PCI_BASE_ADDRESS_MEM_TYPE_64
|
6274 PCI_BASE_ADDRESS_MEM_PREFETCH
, &n
->pmr
.dev
->mr
);
6276 memory_region_set_enabled(&n
->pmr
.dev
->mr
, false);
6279 static int nvme_init_pci(NvmeCtrl
*n
, PCIDevice
*pci_dev
, Error
**errp
)
6281 uint8_t *pci_conf
= pci_dev
->config
;
6282 uint64_t bar_size
, msix_table_size
, msix_pba_size
;
6283 unsigned msix_table_offset
, msix_pba_offset
;
6288 pci_conf
[PCI_INTERRUPT_PIN
] = 1;
6289 pci_config_set_prog_interface(pci_conf
, 0x2);
6291 if (n
->params
.use_intel_id
) {
6292 pci_config_set_vendor_id(pci_conf
, PCI_VENDOR_ID_INTEL
);
6293 pci_config_set_device_id(pci_conf
, 0x5845);
6295 pci_config_set_vendor_id(pci_conf
, PCI_VENDOR_ID_REDHAT
);
6296 pci_config_set_device_id(pci_conf
, PCI_DEVICE_ID_REDHAT_NVME
);
6299 pci_config_set_class(pci_conf
, PCI_CLASS_STORAGE_EXPRESS
);
6300 pcie_endpoint_cap_init(pci_dev
, 0x80);
6302 bar_size
= QEMU_ALIGN_UP(n
->reg_size
, 4 * KiB
);
6303 msix_table_offset
= bar_size
;
6304 msix_table_size
= PCI_MSIX_ENTRY_SIZE
* n
->params
.msix_qsize
;
6306 bar_size
+= msix_table_size
;
6307 bar_size
= QEMU_ALIGN_UP(bar_size
, 4 * KiB
);
6308 msix_pba_offset
= bar_size
;
6309 msix_pba_size
= QEMU_ALIGN_UP(n
->params
.msix_qsize
, 64) / 8;
6311 bar_size
+= msix_pba_size
;
6312 bar_size
= pow2ceil(bar_size
);
6314 memory_region_init(&n
->bar0
, OBJECT(n
), "nvme-bar0", bar_size
);
6315 memory_region_init_io(&n
->iomem
, OBJECT(n
), &nvme_mmio_ops
, n
, "nvme",
6317 memory_region_add_subregion(&n
->bar0
, 0, &n
->iomem
);
6319 pci_register_bar(pci_dev
, 0, PCI_BASE_ADDRESS_SPACE_MEMORY
|
6320 PCI_BASE_ADDRESS_MEM_TYPE_64
, &n
->bar0
);
6321 ret
= msix_init(pci_dev
, n
->params
.msix_qsize
,
6322 &n
->bar0
, 0, msix_table_offset
,
6323 &n
->bar0
, 0, msix_pba_offset
, 0, &err
);
6325 if (ret
== -ENOTSUP
) {
6326 warn_report_err(err
);
6328 error_propagate(errp
, err
);
6333 if (n
->params
.cmb_size_mb
) {
6334 nvme_init_cmb(n
, pci_dev
);
6338 nvme_init_pmr(n
, pci_dev
);
6344 static void nvme_init_subnqn(NvmeCtrl
*n
)
6346 NvmeSubsystem
*subsys
= n
->subsys
;
6347 NvmeIdCtrl
*id
= &n
->id_ctrl
;
6350 snprintf((char *)id
->subnqn
, sizeof(id
->subnqn
),
6351 "nqn.2019-08.org.qemu:%s", n
->params
.serial
);
6353 pstrcpy((char *)id
->subnqn
, sizeof(id
->subnqn
), (char*)subsys
->subnqn
);
6357 static void nvme_init_ctrl(NvmeCtrl
*n
, PCIDevice
*pci_dev
)
6359 NvmeIdCtrl
*id
= &n
->id_ctrl
;
6360 uint8_t *pci_conf
= pci_dev
->config
;
6362 id
->vid
= cpu_to_le16(pci_get_word(pci_conf
+ PCI_VENDOR_ID
));
6363 id
->ssvid
= cpu_to_le16(pci_get_word(pci_conf
+ PCI_SUBSYSTEM_VENDOR_ID
));
6364 strpadcpy((char *)id
->mn
, sizeof(id
->mn
), "QEMU NVMe Ctrl", ' ');
6365 strpadcpy((char *)id
->fr
, sizeof(id
->fr
), "1.0", ' ');
6366 strpadcpy((char *)id
->sn
, sizeof(id
->sn
), n
->params
.serial
, ' ');
6368 id
->cntlid
= cpu_to_le16(n
->cntlid
);
6370 id
->oaes
= cpu_to_le32(NVME_OAES_NS_ATTR
);
6374 if (n
->params
.use_intel_id
) {
6384 id
->mdts
= n
->params
.mdts
;
6385 id
->ver
= cpu_to_le32(NVME_SPEC_VER
);
6386 id
->oacs
= cpu_to_le16(NVME_OACS_NS_MGMT
| NVME_OACS_FORMAT
);
6387 id
->cntrltype
= 0x1;
6390 * Because the controller always completes the Abort command immediately,
6391 * there can never be more than one concurrently executing Abort command,
6392 * so this value is never used for anything. Note that there can easily be
6393 * many Abort commands in the queues, but they are not considered
6394 * "executing" until processed by nvme_abort.
6396 * The specification recommends a value of 3 for Abort Command Limit (four
6397 * concurrently outstanding Abort commands), so lets use that though it is
6401 id
->aerl
= n
->params
.aerl
;
6402 id
->frmw
= (NVME_NUM_FW_SLOTS
<< 1) | NVME_FRMW_SLOT1_RO
;
6403 id
->lpa
= NVME_LPA_NS_SMART
| NVME_LPA_CSE
| NVME_LPA_EXTENDED
;
6405 /* recommended default value (~70 C) */
6406 id
->wctemp
= cpu_to_le16(NVME_TEMPERATURE_WARNING
);
6407 id
->cctemp
= cpu_to_le16(NVME_TEMPERATURE_CRITICAL
);
6409 id
->sqes
= (0x6 << 4) | 0x6;
6410 id
->cqes
= (0x4 << 4) | 0x4;
6411 id
->nn
= cpu_to_le32(NVME_MAX_NAMESPACES
);
6412 id
->oncs
= cpu_to_le16(NVME_ONCS_WRITE_ZEROES
| NVME_ONCS_TIMESTAMP
|
6413 NVME_ONCS_FEATURES
| NVME_ONCS_DSM
|
6414 NVME_ONCS_COMPARE
| NVME_ONCS_COPY
);
6417 * NOTE: If this device ever supports a command set that does NOT use 0x0
6418 * as a Flush-equivalent operation, support for the broadcast NSID in Flush
6419 * should probably be removed.
6421 * See comment in nvme_io_cmd.
6423 id
->vwc
= NVME_VWC_NSID_BROADCAST_SUPPORT
| NVME_VWC_PRESENT
;
6425 id
->ocfs
= cpu_to_le16(NVME_OCFS_COPY_FORMAT_0
);
6426 id
->sgls
= cpu_to_le32(NVME_CTRL_SGLS_SUPPORT_NO_ALIGN
|
6427 NVME_CTRL_SGLS_BITBUCKET
);
6429 nvme_init_subnqn(n
);
6431 id
->psd
[0].mp
= cpu_to_le16(0x9c4);
6432 id
->psd
[0].enlat
= cpu_to_le32(0x10);
6433 id
->psd
[0].exlat
= cpu_to_le32(0x4);
6436 id
->cmic
|= NVME_CMIC_MULTI_CTRL
;
6439 NVME_CAP_SET_MQES(n
->bar
.cap
, 0x7ff);
6440 NVME_CAP_SET_CQR(n
->bar
.cap
, 1);
6441 NVME_CAP_SET_TO(n
->bar
.cap
, 0xf);
6442 NVME_CAP_SET_CSS(n
->bar
.cap
, NVME_CAP_CSS_NVM
);
6443 NVME_CAP_SET_CSS(n
->bar
.cap
, NVME_CAP_CSS_CSI_SUPP
);
6444 NVME_CAP_SET_CSS(n
->bar
.cap
, NVME_CAP_CSS_ADMIN_ONLY
);
6445 NVME_CAP_SET_MPSMAX(n
->bar
.cap
, 4);
6446 NVME_CAP_SET_CMBS(n
->bar
.cap
, n
->params
.cmb_size_mb
? 1 : 0);
6447 NVME_CAP_SET_PMRS(n
->bar
.cap
, n
->pmr
.dev
? 1 : 0);
6449 n
->bar
.vs
= NVME_SPEC_VER
;
6450 n
->bar
.intmc
= n
->bar
.intms
= 0;
6453 static int nvme_init_subsys(NvmeCtrl
*n
, Error
**errp
)
6461 cntlid
= nvme_subsys_register_ctrl(n
, errp
);
6471 void nvme_attach_ns(NvmeCtrl
*n
, NvmeNamespace
*ns
)
6473 uint32_t nsid
= ns
->params
.nsid
;
6474 assert(nsid
&& nsid
<= NVME_MAX_NAMESPACES
);
6476 n
->namespaces
[nsid
] = ns
;
6479 n
->dmrsl
= MIN_NON_ZERO(n
->dmrsl
,
6480 BDRV_REQUEST_MAX_BYTES
/ nvme_l2b(ns
, 1));
6483 static void nvme_realize(PCIDevice
*pci_dev
, Error
**errp
)
6485 NvmeCtrl
*n
= NVME(pci_dev
);
6487 Error
*local_err
= NULL
;
6489 nvme_check_constraints(n
, &local_err
);
6491 error_propagate(errp
, local_err
);
6495 qbus_create_inplace(&n
->bus
, sizeof(NvmeBus
), TYPE_NVME_BUS
,
6496 &pci_dev
->qdev
, n
->parent_obj
.qdev
.id
);
6499 if (nvme_init_pci(n
, pci_dev
, errp
)) {
6503 if (nvme_init_subsys(n
, errp
)) {
6504 error_propagate(errp
, local_err
);
6507 nvme_init_ctrl(n
, pci_dev
);
6509 /* setup a namespace if the controller drive property was given */
6510 if (n
->namespace.blkconf
.blk
) {
6512 ns
->params
.nsid
= 1;
6514 if (nvme_ns_setup(ns
, errp
)) {
6518 nvme_attach_ns(n
, ns
);
6522 static void nvme_exit(PCIDevice
*pci_dev
)
6524 NvmeCtrl
*n
= NVME(pci_dev
);
6530 for (i
= 1; i
<= NVME_MAX_NAMESPACES
; i
++) {
6536 nvme_ns_cleanup(ns
);
6540 nvme_subsys_unregister_ctrl(n
->subsys
, n
);
6545 g_free(n
->aer_reqs
);
6547 if (n
->params
.cmb_size_mb
) {
6552 host_memory_backend_set_mapped(n
->pmr
.dev
, false);
6554 msix_uninit(pci_dev
, &n
->bar0
, &n
->bar0
);
6555 memory_region_del_subregion(&n
->bar0
, &n
->iomem
);
6558 static Property nvme_props
[] = {
6559 DEFINE_BLOCK_PROPERTIES(NvmeCtrl
, namespace.blkconf
),
6560 DEFINE_PROP_LINK("pmrdev", NvmeCtrl
, pmr
.dev
, TYPE_MEMORY_BACKEND
,
6561 HostMemoryBackend
*),
6562 DEFINE_PROP_LINK("subsys", NvmeCtrl
, subsys
, TYPE_NVME_SUBSYS
,
6564 DEFINE_PROP_STRING("serial", NvmeCtrl
, params
.serial
),
6565 DEFINE_PROP_UINT32("cmb_size_mb", NvmeCtrl
, params
.cmb_size_mb
, 0),
6566 DEFINE_PROP_UINT32("num_queues", NvmeCtrl
, params
.num_queues
, 0),
6567 DEFINE_PROP_UINT32("max_ioqpairs", NvmeCtrl
, params
.max_ioqpairs
, 64),
6568 DEFINE_PROP_UINT16("msix_qsize", NvmeCtrl
, params
.msix_qsize
, 65),
6569 DEFINE_PROP_UINT8("aerl", NvmeCtrl
, params
.aerl
, 3),
6570 DEFINE_PROP_UINT32("aer_max_queued", NvmeCtrl
, params
.aer_max_queued
, 64),
6571 DEFINE_PROP_UINT8("mdts", NvmeCtrl
, params
.mdts
, 7),
6572 DEFINE_PROP_UINT8("vsl", NvmeCtrl
, params
.vsl
, 7),
6573 DEFINE_PROP_BOOL("use-intel-id", NvmeCtrl
, params
.use_intel_id
, false),
6574 DEFINE_PROP_BOOL("legacy-cmb", NvmeCtrl
, params
.legacy_cmb
, false),
6575 DEFINE_PROP_UINT8("zoned.zasl", NvmeCtrl
, params
.zasl
, 0),
6576 DEFINE_PROP_BOOL("zoned.auto_transition", NvmeCtrl
,
6577 params
.auto_transition_zones
, true),
6578 DEFINE_PROP_END_OF_LIST(),
6581 static void nvme_get_smart_warning(Object
*obj
, Visitor
*v
, const char *name
,
6582 void *opaque
, Error
**errp
)
6584 NvmeCtrl
*n
= NVME(obj
);
6585 uint8_t value
= n
->smart_critical_warning
;
6587 visit_type_uint8(v
, name
, &value
, errp
);
6590 static void nvme_set_smart_warning(Object
*obj
, Visitor
*v
, const char *name
,
6591 void *opaque
, Error
**errp
)
6593 NvmeCtrl
*n
= NVME(obj
);
6594 uint8_t value
, old_value
, cap
= 0, index
, event
;
6596 if (!visit_type_uint8(v
, name
, &value
, errp
)) {
6600 cap
= NVME_SMART_SPARE
| NVME_SMART_TEMPERATURE
| NVME_SMART_RELIABILITY
6601 | NVME_SMART_MEDIA_READ_ONLY
| NVME_SMART_FAILED_VOLATILE_MEDIA
;
6602 if (NVME_CAP_PMRS(n
->bar
.cap
)) {
6603 cap
|= NVME_SMART_PMR_UNRELIABLE
;
6606 if ((value
& cap
) != value
) {
6607 error_setg(errp
, "unsupported smart critical warning bits: 0x%x",
6612 old_value
= n
->smart_critical_warning
;
6613 n
->smart_critical_warning
= value
;
6615 /* only inject new bits of smart critical warning */
6616 for (index
= 0; index
< NVME_SMART_WARN_MAX
; index
++) {
6618 if (value
& ~old_value
& event
)
6619 nvme_smart_event(n
, event
);
6623 static const VMStateDescription nvme_vmstate
= {
6628 static void nvme_class_init(ObjectClass
*oc
, void *data
)
6630 DeviceClass
*dc
= DEVICE_CLASS(oc
);
6631 PCIDeviceClass
*pc
= PCI_DEVICE_CLASS(oc
);
6633 pc
->realize
= nvme_realize
;
6634 pc
->exit
= nvme_exit
;
6635 pc
->class_id
= PCI_CLASS_STORAGE_EXPRESS
;
6638 set_bit(DEVICE_CATEGORY_STORAGE
, dc
->categories
);
6639 dc
->desc
= "Non-Volatile Memory Express";
6640 device_class_set_props(dc
, nvme_props
);
6641 dc
->vmsd
= &nvme_vmstate
;
6644 static void nvme_instance_init(Object
*obj
)
6646 NvmeCtrl
*n
= NVME(obj
);
6648 device_add_bootindex_property(obj
, &n
->namespace.blkconf
.bootindex
,
6649 "bootindex", "/namespace@1,0",
6652 object_property_add(obj
, "smart_critical_warning", "uint8",
6653 nvme_get_smart_warning
,
6654 nvme_set_smart_warning
, NULL
, NULL
);
6657 static const TypeInfo nvme_info
= {
6659 .parent
= TYPE_PCI_DEVICE
,
6660 .instance_size
= sizeof(NvmeCtrl
),
6661 .instance_init
= nvme_instance_init
,
6662 .class_init
= nvme_class_init
,
6663 .interfaces
= (InterfaceInfo
[]) {
6664 { INTERFACE_PCIE_DEVICE
},
6669 static const TypeInfo nvme_bus_info
= {
6670 .name
= TYPE_NVME_BUS
,
6672 .instance_size
= sizeof(NvmeBus
),
6675 static void nvme_register_types(void)
6677 type_register_static(&nvme_info
);
6678 type_register_static(&nvme_bus_info
);
6681 type_init(nvme_register_types
)