hw/block/nvme: add non-mdts command size limit for verify
[qemu/ar7.git] / hw / block / nvme.c
blob64cb966ab695c8315b6ec4349d4717e8f9989095
1 /*
2 * QEMU NVM Express Controller
4 * Copyright (c) 2012, Intel Corporation
6 * Written by Keith Busch <keith.busch@intel.com>
8 * This code is licensed under the GNU GPL v2 or later.
9 */
11 /**
12 * Reference Specs: http://www.nvmexpress.org, 1.4, 1.3, 1.2, 1.1, 1.0e
14 * https://nvmexpress.org/developers/nvme-specification/
17 /**
18 * Usage: add options:
19 * -drive file=<file>,if=none,id=<drive_id>
20 * -device nvme-subsys,id=<subsys_id>,nqn=<nqn_id>
21 * -device nvme,serial=<serial>,id=<bus_name>, \
22 * cmb_size_mb=<cmb_size_mb[optional]>, \
23 * [pmrdev=<mem_backend_file_id>,] \
24 * max_ioqpairs=<N[optional]>, \
25 * aerl=<N[optional]>,aer_max_queued=<N[optional]>, \
26 * mdts=<N[optional]>,vsl=<N[optional]>, \
27 * zoned.zasl=<N[optional]>, \
28 * subsys=<subsys_id>
29 * -device nvme-ns,drive=<drive_id>,bus=<bus_name>,nsid=<nsid>,\
30 * zoned=<true|false[optional]>, \
31 * subsys=<subsys_id>,detached=<true|false[optional]>
33 * Note cmb_size_mb denotes size of CMB in MB. CMB is assumed to be at
34 * offset 0 in BAR2 and supports only WDS, RDS and SQS for now. By default, the
35 * device will use the "v1.4 CMB scheme" - use the `legacy-cmb` parameter to
36 * always enable the CMBLOC and CMBSZ registers (v1.3 behavior).
38 * Enabling pmr emulation can be achieved by pointing to memory-backend-file.
39 * For example:
40 * -object memory-backend-file,id=<mem_id>,share=on,mem-path=<file_path>, \
41 * size=<size> .... -device nvme,...,pmrdev=<mem_id>
43 * The PMR will use BAR 4/5 exclusively.
45 * To place controller(s) and namespace(s) to a subsystem, then provide
46 * nvme-subsys device as above.
48 * nvme subsystem device parameters
49 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
50 * - `nqn`
51 * This parameter provides the `<nqn_id>` part of the string
52 * `nqn.2019-08.org.qemu:<nqn_id>` which will be reported in the SUBNQN field
53 * of subsystem controllers. Note that `<nqn_id>` should be unique per
54 * subsystem, but this is not enforced by QEMU. If not specified, it will
55 * default to the value of the `id` parameter (`<subsys_id>`).
57 * nvme device parameters
58 * ~~~~~~~~~~~~~~~~~~~~~~
59 * - `subsys`
60 * Specifying this parameter attaches the controller to the subsystem and
61 * the SUBNQN field in the controller will report the NQN of the subsystem
62 * device. This also enables multi controller capability represented in
63 * Identify Controller data structure in CMIC (Controller Multi-path I/O and
64 * Namesapce Sharing Capabilities).
66 * - `aerl`
67 * The Asynchronous Event Request Limit (AERL). Indicates the maximum number
68 * of concurrently outstanding Asynchronous Event Request commands support
69 * by the controller. This is a 0's based value.
71 * - `aer_max_queued`
72 * This is the maximum number of events that the device will enqueue for
73 * completion when there are no outstanding AERs. When the maximum number of
74 * enqueued events are reached, subsequent events will be dropped.
76 * - `mdts`
77 * Indicates the maximum data transfer size for a command that transfers data
78 * between host-accessible memory and the controller. The value is specified
79 * as a power of two (2^n) and is in units of the minimum memory page size
80 * (CAP.MPSMIN). The default value is 7 (i.e. 512 KiB).
82 * - `vsl`
83 * Indicates the maximum data size limit for the Verify command. Like `mdts`,
84 * this value is specified as a power of two (2^n) and is in units of the
85 * minimum memory page size (CAP.MPSMIN). The default value is 7 (i.e. 512
86 * KiB).
88 * - `zoned.zasl`
89 * Indicates the maximum data transfer size for the Zone Append command. Like
90 * `mdts`, the value is specified as a power of two (2^n) and is in units of
91 * the minimum memory page size (CAP.MPSMIN). The default value is 0 (i.e.
92 * defaulting to the value of `mdts`).
94 * - `zoned.append_size_limit`
95 * The maximum I/O size in bytes that is allowed in Zone Append command.
96 * The default is 128KiB. Since internally this this value is maintained as
97 * ZASL = log2(<maximum append size> / <page size>), some values assigned
98 * to this property may be rounded down and result in a lower maximum ZA
99 * data size being in effect. By setting this property to 0, users can make
100 * ZASL to be equal to MDTS. This property only affects zoned namespaces.
102 * nvme namespace device parameters
103 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
104 * - `subsys`
105 * If given, the namespace will be attached to all controllers in the
106 * subsystem. Otherwise, `bus` must be given to attach this namespace to a
107 * specific controller as a non-shared namespace.
109 * - `detached`
110 * This parameter is only valid together with the `subsys` parameter. If left
111 * at the default value (`false/off`), the namespace will be attached to all
112 * controllers in the NVMe subsystem at boot-up. If set to `true/on`, the
113 * namespace will be be available in the subsystem not not attached to any
114 * controllers.
116 * Setting `zoned` to true selects Zoned Command Set at the namespace.
117 * In this case, the following namespace properties are available to configure
118 * zoned operation:
119 * zoned.zone_size=<zone size in bytes, default: 128MiB>
120 * The number may be followed by K, M, G as in kilo-, mega- or giga-.
122 * zoned.zone_capacity=<zone capacity in bytes, default: zone size>
123 * The value 0 (default) forces zone capacity to be the same as zone
124 * size. The value of this property may not exceed zone size.
126 * zoned.descr_ext_size=<zone descriptor extension size, default 0>
127 * This value needs to be specified in 64B units. If it is zero,
128 * namespace(s) will not support zone descriptor extensions.
130 * zoned.max_active=<Maximum Active Resources (zones), default: 0>
131 * The default value means there is no limit to the number of
132 * concurrently active zones.
134 * zoned.max_open=<Maximum Open Resources (zones), default: 0>
135 * The default value means there is no limit to the number of
136 * concurrently open zones.
138 * zoned.cross_read=<enable RAZB, default: false>
139 * Setting this property to true enables Read Across Zone Boundaries.
142 #include "qemu/osdep.h"
143 #include "qemu/units.h"
144 #include "qemu/error-report.h"
145 #include "hw/block/block.h"
146 #include "hw/pci/msix.h"
147 #include "hw/pci/pci.h"
148 #include "hw/qdev-properties.h"
149 #include "migration/vmstate.h"
150 #include "sysemu/sysemu.h"
151 #include "qapi/error.h"
152 #include "qapi/visitor.h"
153 #include "sysemu/hostmem.h"
154 #include "sysemu/block-backend.h"
155 #include "exec/memory.h"
156 #include "qemu/log.h"
157 #include "qemu/module.h"
158 #include "qemu/cutils.h"
159 #include "trace.h"
160 #include "nvme.h"
161 #include "nvme-ns.h"
162 #include "nvme-dif.h"
164 #define NVME_MAX_IOQPAIRS 0xffff
165 #define NVME_DB_SIZE 4
166 #define NVME_SPEC_VER 0x00010400
167 #define NVME_CMB_BIR 2
168 #define NVME_PMR_BIR 4
169 #define NVME_TEMPERATURE 0x143
170 #define NVME_TEMPERATURE_WARNING 0x157
171 #define NVME_TEMPERATURE_CRITICAL 0x175
172 #define NVME_NUM_FW_SLOTS 1
174 #define NVME_GUEST_ERR(trace, fmt, ...) \
175 do { \
176 (trace_##trace)(__VA_ARGS__); \
177 qemu_log_mask(LOG_GUEST_ERROR, #trace \
178 " in %s: " fmt "\n", __func__, ## __VA_ARGS__); \
179 } while (0)
181 static const bool nvme_feature_support[NVME_FID_MAX] = {
182 [NVME_ARBITRATION] = true,
183 [NVME_POWER_MANAGEMENT] = true,
184 [NVME_TEMPERATURE_THRESHOLD] = true,
185 [NVME_ERROR_RECOVERY] = true,
186 [NVME_VOLATILE_WRITE_CACHE] = true,
187 [NVME_NUMBER_OF_QUEUES] = true,
188 [NVME_INTERRUPT_COALESCING] = true,
189 [NVME_INTERRUPT_VECTOR_CONF] = true,
190 [NVME_WRITE_ATOMICITY] = true,
191 [NVME_ASYNCHRONOUS_EVENT_CONF] = true,
192 [NVME_TIMESTAMP] = true,
195 static const uint32_t nvme_feature_cap[NVME_FID_MAX] = {
196 [NVME_TEMPERATURE_THRESHOLD] = NVME_FEAT_CAP_CHANGE,
197 [NVME_ERROR_RECOVERY] = NVME_FEAT_CAP_CHANGE | NVME_FEAT_CAP_NS,
198 [NVME_VOLATILE_WRITE_CACHE] = NVME_FEAT_CAP_CHANGE,
199 [NVME_NUMBER_OF_QUEUES] = NVME_FEAT_CAP_CHANGE,
200 [NVME_ASYNCHRONOUS_EVENT_CONF] = NVME_FEAT_CAP_CHANGE,
201 [NVME_TIMESTAMP] = NVME_FEAT_CAP_CHANGE,
204 static const uint32_t nvme_cse_acs[256] = {
205 [NVME_ADM_CMD_DELETE_SQ] = NVME_CMD_EFF_CSUPP,
206 [NVME_ADM_CMD_CREATE_SQ] = NVME_CMD_EFF_CSUPP,
207 [NVME_ADM_CMD_GET_LOG_PAGE] = NVME_CMD_EFF_CSUPP,
208 [NVME_ADM_CMD_DELETE_CQ] = NVME_CMD_EFF_CSUPP,
209 [NVME_ADM_CMD_CREATE_CQ] = NVME_CMD_EFF_CSUPP,
210 [NVME_ADM_CMD_IDENTIFY] = NVME_CMD_EFF_CSUPP,
211 [NVME_ADM_CMD_ABORT] = NVME_CMD_EFF_CSUPP,
212 [NVME_ADM_CMD_SET_FEATURES] = NVME_CMD_EFF_CSUPP,
213 [NVME_ADM_CMD_GET_FEATURES] = NVME_CMD_EFF_CSUPP,
214 [NVME_ADM_CMD_ASYNC_EV_REQ] = NVME_CMD_EFF_CSUPP,
215 [NVME_ADM_CMD_NS_ATTACHMENT] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_NIC,
218 static const uint32_t nvme_cse_iocs_none[256];
220 static const uint32_t nvme_cse_iocs_nvm[256] = {
221 [NVME_CMD_FLUSH] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC,
222 [NVME_CMD_WRITE_ZEROES] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC,
223 [NVME_CMD_WRITE] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC,
224 [NVME_CMD_READ] = NVME_CMD_EFF_CSUPP,
225 [NVME_CMD_DSM] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC,
226 [NVME_CMD_VERIFY] = NVME_CMD_EFF_CSUPP,
227 [NVME_CMD_COPY] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC,
228 [NVME_CMD_COMPARE] = NVME_CMD_EFF_CSUPP,
231 static const uint32_t nvme_cse_iocs_zoned[256] = {
232 [NVME_CMD_FLUSH] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC,
233 [NVME_CMD_WRITE_ZEROES] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC,
234 [NVME_CMD_WRITE] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC,
235 [NVME_CMD_READ] = NVME_CMD_EFF_CSUPP,
236 [NVME_CMD_DSM] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC,
237 [NVME_CMD_VERIFY] = NVME_CMD_EFF_CSUPP,
238 [NVME_CMD_COPY] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC,
239 [NVME_CMD_COMPARE] = NVME_CMD_EFF_CSUPP,
240 [NVME_CMD_ZONE_APPEND] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC,
241 [NVME_CMD_ZONE_MGMT_SEND] = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC,
242 [NVME_CMD_ZONE_MGMT_RECV] = NVME_CMD_EFF_CSUPP,
245 static void nvme_process_sq(void *opaque);
247 static uint16_t nvme_sqid(NvmeRequest *req)
249 return le16_to_cpu(req->sq->sqid);
252 static void nvme_assign_zone_state(NvmeNamespace *ns, NvmeZone *zone,
253 NvmeZoneState state)
255 if (QTAILQ_IN_USE(zone, entry)) {
256 switch (nvme_get_zone_state(zone)) {
257 case NVME_ZONE_STATE_EXPLICITLY_OPEN:
258 QTAILQ_REMOVE(&ns->exp_open_zones, zone, entry);
259 break;
260 case NVME_ZONE_STATE_IMPLICITLY_OPEN:
261 QTAILQ_REMOVE(&ns->imp_open_zones, zone, entry);
262 break;
263 case NVME_ZONE_STATE_CLOSED:
264 QTAILQ_REMOVE(&ns->closed_zones, zone, entry);
265 break;
266 case NVME_ZONE_STATE_FULL:
267 QTAILQ_REMOVE(&ns->full_zones, zone, entry);
268 default:
273 nvme_set_zone_state(zone, state);
275 switch (state) {
276 case NVME_ZONE_STATE_EXPLICITLY_OPEN:
277 QTAILQ_INSERT_TAIL(&ns->exp_open_zones, zone, entry);
278 break;
279 case NVME_ZONE_STATE_IMPLICITLY_OPEN:
280 QTAILQ_INSERT_TAIL(&ns->imp_open_zones, zone, entry);
281 break;
282 case NVME_ZONE_STATE_CLOSED:
283 QTAILQ_INSERT_TAIL(&ns->closed_zones, zone, entry);
284 break;
285 case NVME_ZONE_STATE_FULL:
286 QTAILQ_INSERT_TAIL(&ns->full_zones, zone, entry);
287 case NVME_ZONE_STATE_READ_ONLY:
288 break;
289 default:
290 zone->d.za = 0;
295 * Check if we can open a zone without exceeding open/active limits.
296 * AOR stands for "Active and Open Resources" (see TP 4053 section 2.5).
298 static int nvme_aor_check(NvmeNamespace *ns, uint32_t act, uint32_t opn)
300 if (ns->params.max_active_zones != 0 &&
301 ns->nr_active_zones + act > ns->params.max_active_zones) {
302 trace_pci_nvme_err_insuff_active_res(ns->params.max_active_zones);
303 return NVME_ZONE_TOO_MANY_ACTIVE | NVME_DNR;
305 if (ns->params.max_open_zones != 0 &&
306 ns->nr_open_zones + opn > ns->params.max_open_zones) {
307 trace_pci_nvme_err_insuff_open_res(ns->params.max_open_zones);
308 return NVME_ZONE_TOO_MANY_OPEN | NVME_DNR;
311 return NVME_SUCCESS;
314 static bool nvme_addr_is_cmb(NvmeCtrl *n, hwaddr addr)
316 hwaddr hi, lo;
318 if (!n->cmb.cmse) {
319 return false;
322 lo = n->params.legacy_cmb ? n->cmb.mem.addr : n->cmb.cba;
323 hi = lo + int128_get64(n->cmb.mem.size);
325 return addr >= lo && addr < hi;
328 static inline void *nvme_addr_to_cmb(NvmeCtrl *n, hwaddr addr)
330 hwaddr base = n->params.legacy_cmb ? n->cmb.mem.addr : n->cmb.cba;
331 return &n->cmb.buf[addr - base];
334 static bool nvme_addr_is_pmr(NvmeCtrl *n, hwaddr addr)
336 hwaddr hi;
338 if (!n->pmr.cmse) {
339 return false;
342 hi = n->pmr.cba + int128_get64(n->pmr.dev->mr.size);
344 return addr >= n->pmr.cba && addr < hi;
347 static inline void *nvme_addr_to_pmr(NvmeCtrl *n, hwaddr addr)
349 return memory_region_get_ram_ptr(&n->pmr.dev->mr) + (addr - n->pmr.cba);
352 static int nvme_addr_read(NvmeCtrl *n, hwaddr addr, void *buf, int size)
354 hwaddr hi = addr + size - 1;
355 if (hi < addr) {
356 return 1;
359 if (n->bar.cmbsz && nvme_addr_is_cmb(n, addr) && nvme_addr_is_cmb(n, hi)) {
360 memcpy(buf, nvme_addr_to_cmb(n, addr), size);
361 return 0;
364 if (nvme_addr_is_pmr(n, addr) && nvme_addr_is_pmr(n, hi)) {
365 memcpy(buf, nvme_addr_to_pmr(n, addr), size);
366 return 0;
369 return pci_dma_read(&n->parent_obj, addr, buf, size);
372 static int nvme_addr_write(NvmeCtrl *n, hwaddr addr, void *buf, int size)
374 hwaddr hi = addr + size - 1;
375 if (hi < addr) {
376 return 1;
379 if (n->bar.cmbsz && nvme_addr_is_cmb(n, addr) && nvme_addr_is_cmb(n, hi)) {
380 memcpy(nvme_addr_to_cmb(n, addr), buf, size);
381 return 0;
384 if (nvme_addr_is_pmr(n, addr) && nvme_addr_is_pmr(n, hi)) {
385 memcpy(nvme_addr_to_pmr(n, addr), buf, size);
386 return 0;
389 return pci_dma_write(&n->parent_obj, addr, buf, size);
392 static bool nvme_nsid_valid(NvmeCtrl *n, uint32_t nsid)
394 return nsid && (nsid == NVME_NSID_BROADCAST || nsid <= n->num_namespaces);
397 static int nvme_check_sqid(NvmeCtrl *n, uint16_t sqid)
399 return sqid < n->params.max_ioqpairs + 1 && n->sq[sqid] != NULL ? 0 : -1;
402 static int nvme_check_cqid(NvmeCtrl *n, uint16_t cqid)
404 return cqid < n->params.max_ioqpairs + 1 && n->cq[cqid] != NULL ? 0 : -1;
407 static void nvme_inc_cq_tail(NvmeCQueue *cq)
409 cq->tail++;
410 if (cq->tail >= cq->size) {
411 cq->tail = 0;
412 cq->phase = !cq->phase;
416 static void nvme_inc_sq_head(NvmeSQueue *sq)
418 sq->head = (sq->head + 1) % sq->size;
421 static uint8_t nvme_cq_full(NvmeCQueue *cq)
423 return (cq->tail + 1) % cq->size == cq->head;
426 static uint8_t nvme_sq_empty(NvmeSQueue *sq)
428 return sq->head == sq->tail;
431 static void nvme_irq_check(NvmeCtrl *n)
433 if (msix_enabled(&(n->parent_obj))) {
434 return;
436 if (~n->bar.intms & n->irq_status) {
437 pci_irq_assert(&n->parent_obj);
438 } else {
439 pci_irq_deassert(&n->parent_obj);
443 static void nvme_irq_assert(NvmeCtrl *n, NvmeCQueue *cq)
445 if (cq->irq_enabled) {
446 if (msix_enabled(&(n->parent_obj))) {
447 trace_pci_nvme_irq_msix(cq->vector);
448 msix_notify(&(n->parent_obj), cq->vector);
449 } else {
450 trace_pci_nvme_irq_pin();
451 assert(cq->vector < 32);
452 n->irq_status |= 1 << cq->vector;
453 nvme_irq_check(n);
455 } else {
456 trace_pci_nvme_irq_masked();
460 static void nvme_irq_deassert(NvmeCtrl *n, NvmeCQueue *cq)
462 if (cq->irq_enabled) {
463 if (msix_enabled(&(n->parent_obj))) {
464 return;
465 } else {
466 assert(cq->vector < 32);
467 n->irq_status &= ~(1 << cq->vector);
468 nvme_irq_check(n);
473 static void nvme_req_clear(NvmeRequest *req)
475 req->ns = NULL;
476 req->opaque = NULL;
477 memset(&req->cqe, 0x0, sizeof(req->cqe));
478 req->status = NVME_SUCCESS;
481 static inline void nvme_sg_init(NvmeCtrl *n, NvmeSg *sg, bool dma)
483 if (dma) {
484 pci_dma_sglist_init(&sg->qsg, &n->parent_obj, 0);
485 sg->flags = NVME_SG_DMA;
486 } else {
487 qemu_iovec_init(&sg->iov, 0);
490 sg->flags |= NVME_SG_ALLOC;
493 static inline void nvme_sg_unmap(NvmeSg *sg)
495 if (!(sg->flags & NVME_SG_ALLOC)) {
496 return;
499 if (sg->flags & NVME_SG_DMA) {
500 qemu_sglist_destroy(&sg->qsg);
501 } else {
502 qemu_iovec_destroy(&sg->iov);
505 memset(sg, 0x0, sizeof(*sg));
509 * When metadata is transfered as extended LBAs, the DPTR mapped into `sg`
510 * holds both data and metadata. This function splits the data and metadata
511 * into two separate QSG/IOVs.
513 static void nvme_sg_split(NvmeSg *sg, NvmeNamespace *ns, NvmeSg *data,
514 NvmeSg *mdata)
516 NvmeSg *dst = data;
517 size_t size = nvme_lsize(ns);
518 size_t msize = nvme_msize(ns);
519 uint32_t trans_len, count = size;
520 uint64_t offset = 0;
521 bool dma = sg->flags & NVME_SG_DMA;
522 size_t sge_len;
523 size_t sg_len = dma ? sg->qsg.size : sg->iov.size;
524 int sg_idx = 0;
526 assert(sg->flags & NVME_SG_ALLOC);
528 while (sg_len) {
529 sge_len = dma ? sg->qsg.sg[sg_idx].len : sg->iov.iov[sg_idx].iov_len;
531 trans_len = MIN(sg_len, count);
532 trans_len = MIN(trans_len, sge_len - offset);
534 if (dst) {
535 if (dma) {
536 qemu_sglist_add(&dst->qsg, sg->qsg.sg[sg_idx].base + offset,
537 trans_len);
538 } else {
539 qemu_iovec_add(&dst->iov,
540 sg->iov.iov[sg_idx].iov_base + offset,
541 trans_len);
545 sg_len -= trans_len;
546 count -= trans_len;
547 offset += trans_len;
549 if (count == 0) {
550 dst = (dst == data) ? mdata : data;
551 count = (dst == data) ? size : msize;
554 if (sge_len == offset) {
555 offset = 0;
556 sg_idx++;
561 static uint16_t nvme_map_addr_cmb(NvmeCtrl *n, QEMUIOVector *iov, hwaddr addr,
562 size_t len)
564 if (!len) {
565 return NVME_SUCCESS;
568 trace_pci_nvme_map_addr_cmb(addr, len);
570 if (!nvme_addr_is_cmb(n, addr) || !nvme_addr_is_cmb(n, addr + len - 1)) {
571 return NVME_DATA_TRAS_ERROR;
574 qemu_iovec_add(iov, nvme_addr_to_cmb(n, addr), len);
576 return NVME_SUCCESS;
579 static uint16_t nvme_map_addr_pmr(NvmeCtrl *n, QEMUIOVector *iov, hwaddr addr,
580 size_t len)
582 if (!len) {
583 return NVME_SUCCESS;
586 if (!nvme_addr_is_pmr(n, addr) || !nvme_addr_is_pmr(n, addr + len - 1)) {
587 return NVME_DATA_TRAS_ERROR;
590 qemu_iovec_add(iov, nvme_addr_to_pmr(n, addr), len);
592 return NVME_SUCCESS;
595 static uint16_t nvme_map_addr(NvmeCtrl *n, NvmeSg *sg, hwaddr addr, size_t len)
597 bool cmb = false, pmr = false;
599 if (!len) {
600 return NVME_SUCCESS;
603 trace_pci_nvme_map_addr(addr, len);
605 if (nvme_addr_is_cmb(n, addr)) {
606 cmb = true;
607 } else if (nvme_addr_is_pmr(n, addr)) {
608 pmr = true;
611 if (cmb || pmr) {
612 if (sg->flags & NVME_SG_DMA) {
613 return NVME_INVALID_USE_OF_CMB | NVME_DNR;
616 if (cmb) {
617 return nvme_map_addr_cmb(n, &sg->iov, addr, len);
618 } else {
619 return nvme_map_addr_pmr(n, &sg->iov, addr, len);
623 if (!(sg->flags & NVME_SG_DMA)) {
624 return NVME_INVALID_USE_OF_CMB | NVME_DNR;
627 qemu_sglist_add(&sg->qsg, addr, len);
629 return NVME_SUCCESS;
632 static inline bool nvme_addr_is_dma(NvmeCtrl *n, hwaddr addr)
634 return !(nvme_addr_is_cmb(n, addr) || nvme_addr_is_pmr(n, addr));
637 static uint16_t nvme_map_prp(NvmeCtrl *n, NvmeSg *sg, uint64_t prp1,
638 uint64_t prp2, uint32_t len)
640 hwaddr trans_len = n->page_size - (prp1 % n->page_size);
641 trans_len = MIN(len, trans_len);
642 int num_prps = (len >> n->page_bits) + 1;
643 uint16_t status;
644 int ret;
646 trace_pci_nvme_map_prp(trans_len, len, prp1, prp2, num_prps);
648 nvme_sg_init(n, sg, nvme_addr_is_dma(n, prp1));
650 status = nvme_map_addr(n, sg, prp1, trans_len);
651 if (status) {
652 goto unmap;
655 len -= trans_len;
656 if (len) {
657 if (len > n->page_size) {
658 uint64_t prp_list[n->max_prp_ents];
659 uint32_t nents, prp_trans;
660 int i = 0;
662 nents = (len + n->page_size - 1) >> n->page_bits;
663 prp_trans = MIN(n->max_prp_ents, nents) * sizeof(uint64_t);
664 ret = nvme_addr_read(n, prp2, (void *)prp_list, prp_trans);
665 if (ret) {
666 trace_pci_nvme_err_addr_read(prp2);
667 status = NVME_DATA_TRAS_ERROR;
668 goto unmap;
670 while (len != 0) {
671 uint64_t prp_ent = le64_to_cpu(prp_list[i]);
673 if (i == n->max_prp_ents - 1 && len > n->page_size) {
674 if (unlikely(prp_ent & (n->page_size - 1))) {
675 trace_pci_nvme_err_invalid_prplist_ent(prp_ent);
676 status = NVME_INVALID_PRP_OFFSET | NVME_DNR;
677 goto unmap;
680 i = 0;
681 nents = (len + n->page_size - 1) >> n->page_bits;
682 prp_trans = MIN(n->max_prp_ents, nents) * sizeof(uint64_t);
683 ret = nvme_addr_read(n, prp_ent, (void *)prp_list,
684 prp_trans);
685 if (ret) {
686 trace_pci_nvme_err_addr_read(prp_ent);
687 status = NVME_DATA_TRAS_ERROR;
688 goto unmap;
690 prp_ent = le64_to_cpu(prp_list[i]);
693 if (unlikely(prp_ent & (n->page_size - 1))) {
694 trace_pci_nvme_err_invalid_prplist_ent(prp_ent);
695 status = NVME_INVALID_PRP_OFFSET | NVME_DNR;
696 goto unmap;
699 trans_len = MIN(len, n->page_size);
700 status = nvme_map_addr(n, sg, prp_ent, trans_len);
701 if (status) {
702 goto unmap;
705 len -= trans_len;
706 i++;
708 } else {
709 if (unlikely(prp2 & (n->page_size - 1))) {
710 trace_pci_nvme_err_invalid_prp2_align(prp2);
711 status = NVME_INVALID_PRP_OFFSET | NVME_DNR;
712 goto unmap;
714 status = nvme_map_addr(n, sg, prp2, len);
715 if (status) {
716 goto unmap;
721 return NVME_SUCCESS;
723 unmap:
724 nvme_sg_unmap(sg);
725 return status;
729 * Map 'nsgld' data descriptors from 'segment'. The function will subtract the
730 * number of bytes mapped in len.
732 static uint16_t nvme_map_sgl_data(NvmeCtrl *n, NvmeSg *sg,
733 NvmeSglDescriptor *segment, uint64_t nsgld,
734 size_t *len, NvmeCmd *cmd)
736 dma_addr_t addr, trans_len;
737 uint32_t dlen;
738 uint16_t status;
740 for (int i = 0; i < nsgld; i++) {
741 uint8_t type = NVME_SGL_TYPE(segment[i].type);
743 switch (type) {
744 case NVME_SGL_DESCR_TYPE_BIT_BUCKET:
745 if (cmd->opcode == NVME_CMD_WRITE) {
746 continue;
748 case NVME_SGL_DESCR_TYPE_DATA_BLOCK:
749 break;
750 case NVME_SGL_DESCR_TYPE_SEGMENT:
751 case NVME_SGL_DESCR_TYPE_LAST_SEGMENT:
752 return NVME_INVALID_NUM_SGL_DESCRS | NVME_DNR;
753 default:
754 return NVME_SGL_DESCR_TYPE_INVALID | NVME_DNR;
757 dlen = le32_to_cpu(segment[i].len);
759 if (!dlen) {
760 continue;
763 if (*len == 0) {
765 * All data has been mapped, but the SGL contains additional
766 * segments and/or descriptors. The controller might accept
767 * ignoring the rest of the SGL.
769 uint32_t sgls = le32_to_cpu(n->id_ctrl.sgls);
770 if (sgls & NVME_CTRL_SGLS_EXCESS_LENGTH) {
771 break;
774 trace_pci_nvme_err_invalid_sgl_excess_length(dlen);
775 return NVME_DATA_SGL_LEN_INVALID | NVME_DNR;
778 trans_len = MIN(*len, dlen);
780 if (type == NVME_SGL_DESCR_TYPE_BIT_BUCKET) {
781 goto next;
784 addr = le64_to_cpu(segment[i].addr);
786 if (UINT64_MAX - addr < dlen) {
787 return NVME_DATA_SGL_LEN_INVALID | NVME_DNR;
790 status = nvme_map_addr(n, sg, addr, trans_len);
791 if (status) {
792 return status;
795 next:
796 *len -= trans_len;
799 return NVME_SUCCESS;
802 static uint16_t nvme_map_sgl(NvmeCtrl *n, NvmeSg *sg, NvmeSglDescriptor sgl,
803 size_t len, NvmeCmd *cmd)
806 * Read the segment in chunks of 256 descriptors (one 4k page) to avoid
807 * dynamically allocating a potentially huge SGL. The spec allows the SGL
808 * to be larger (as in number of bytes required to describe the SGL
809 * descriptors and segment chain) than the command transfer size, so it is
810 * not bounded by MDTS.
812 const int SEG_CHUNK_SIZE = 256;
814 NvmeSglDescriptor segment[SEG_CHUNK_SIZE], *sgld, *last_sgld;
815 uint64_t nsgld;
816 uint32_t seg_len;
817 uint16_t status;
818 hwaddr addr;
819 int ret;
821 sgld = &sgl;
822 addr = le64_to_cpu(sgl.addr);
824 trace_pci_nvme_map_sgl(NVME_SGL_TYPE(sgl.type), len);
826 nvme_sg_init(n, sg, nvme_addr_is_dma(n, addr));
829 * If the entire transfer can be described with a single data block it can
830 * be mapped directly.
832 if (NVME_SGL_TYPE(sgl.type) == NVME_SGL_DESCR_TYPE_DATA_BLOCK) {
833 status = nvme_map_sgl_data(n, sg, sgld, 1, &len, cmd);
834 if (status) {
835 goto unmap;
838 goto out;
841 for (;;) {
842 switch (NVME_SGL_TYPE(sgld->type)) {
843 case NVME_SGL_DESCR_TYPE_SEGMENT:
844 case NVME_SGL_DESCR_TYPE_LAST_SEGMENT:
845 break;
846 default:
847 return NVME_INVALID_SGL_SEG_DESCR | NVME_DNR;
850 seg_len = le32_to_cpu(sgld->len);
852 /* check the length of the (Last) Segment descriptor */
853 if ((!seg_len || seg_len & 0xf) &&
854 (NVME_SGL_TYPE(sgld->type) != NVME_SGL_DESCR_TYPE_BIT_BUCKET)) {
855 return NVME_INVALID_SGL_SEG_DESCR | NVME_DNR;
858 if (UINT64_MAX - addr < seg_len) {
859 return NVME_DATA_SGL_LEN_INVALID | NVME_DNR;
862 nsgld = seg_len / sizeof(NvmeSglDescriptor);
864 while (nsgld > SEG_CHUNK_SIZE) {
865 if (nvme_addr_read(n, addr, segment, sizeof(segment))) {
866 trace_pci_nvme_err_addr_read(addr);
867 status = NVME_DATA_TRAS_ERROR;
868 goto unmap;
871 status = nvme_map_sgl_data(n, sg, segment, SEG_CHUNK_SIZE,
872 &len, cmd);
873 if (status) {
874 goto unmap;
877 nsgld -= SEG_CHUNK_SIZE;
878 addr += SEG_CHUNK_SIZE * sizeof(NvmeSglDescriptor);
881 ret = nvme_addr_read(n, addr, segment, nsgld *
882 sizeof(NvmeSglDescriptor));
883 if (ret) {
884 trace_pci_nvme_err_addr_read(addr);
885 status = NVME_DATA_TRAS_ERROR;
886 goto unmap;
889 last_sgld = &segment[nsgld - 1];
892 * If the segment ends with a Data Block or Bit Bucket Descriptor Type,
893 * then we are done.
895 switch (NVME_SGL_TYPE(last_sgld->type)) {
896 case NVME_SGL_DESCR_TYPE_DATA_BLOCK:
897 case NVME_SGL_DESCR_TYPE_BIT_BUCKET:
898 status = nvme_map_sgl_data(n, sg, segment, nsgld, &len, cmd);
899 if (status) {
900 goto unmap;
903 goto out;
905 default:
906 break;
910 * If the last descriptor was not a Data Block or Bit Bucket, then the
911 * current segment must not be a Last Segment.
913 if (NVME_SGL_TYPE(sgld->type) == NVME_SGL_DESCR_TYPE_LAST_SEGMENT) {
914 status = NVME_INVALID_SGL_SEG_DESCR | NVME_DNR;
915 goto unmap;
918 sgld = last_sgld;
919 addr = le64_to_cpu(sgld->addr);
922 * Do not map the last descriptor; it will be a Segment or Last Segment
923 * descriptor and is handled by the next iteration.
925 status = nvme_map_sgl_data(n, sg, segment, nsgld - 1, &len, cmd);
926 if (status) {
927 goto unmap;
931 out:
932 /* if there is any residual left in len, the SGL was too short */
933 if (len) {
934 status = NVME_DATA_SGL_LEN_INVALID | NVME_DNR;
935 goto unmap;
938 return NVME_SUCCESS;
940 unmap:
941 nvme_sg_unmap(sg);
942 return status;
945 uint16_t nvme_map_dptr(NvmeCtrl *n, NvmeSg *sg, size_t len,
946 NvmeCmd *cmd)
948 uint64_t prp1, prp2;
950 switch (NVME_CMD_FLAGS_PSDT(cmd->flags)) {
951 case NVME_PSDT_PRP:
952 prp1 = le64_to_cpu(cmd->dptr.prp1);
953 prp2 = le64_to_cpu(cmd->dptr.prp2);
955 return nvme_map_prp(n, sg, prp1, prp2, len);
956 case NVME_PSDT_SGL_MPTR_CONTIGUOUS:
957 case NVME_PSDT_SGL_MPTR_SGL:
958 return nvme_map_sgl(n, sg, cmd->dptr.sgl, len, cmd);
959 default:
960 return NVME_INVALID_FIELD;
964 static uint16_t nvme_map_mptr(NvmeCtrl *n, NvmeSg *sg, size_t len,
965 NvmeCmd *cmd)
967 int psdt = NVME_CMD_FLAGS_PSDT(cmd->flags);
968 hwaddr mptr = le64_to_cpu(cmd->mptr);
969 uint16_t status;
971 if (psdt == NVME_PSDT_SGL_MPTR_SGL) {
972 NvmeSglDescriptor sgl;
974 if (nvme_addr_read(n, mptr, &sgl, sizeof(sgl))) {
975 return NVME_DATA_TRAS_ERROR;
978 status = nvme_map_sgl(n, sg, sgl, len, cmd);
979 if (status && (status & 0x7ff) == NVME_DATA_SGL_LEN_INVALID) {
980 status = NVME_MD_SGL_LEN_INVALID | NVME_DNR;
983 return status;
986 nvme_sg_init(n, sg, nvme_addr_is_dma(n, mptr));
987 status = nvme_map_addr(n, sg, mptr, len);
988 if (status) {
989 nvme_sg_unmap(sg);
992 return status;
995 static uint16_t nvme_map_data(NvmeCtrl *n, uint32_t nlb, NvmeRequest *req)
997 NvmeNamespace *ns = req->ns;
998 NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd;
999 uint16_t ctrl = le16_to_cpu(rw->control);
1000 size_t len = nvme_l2b(ns, nlb);
1001 uint16_t status;
1003 if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps) &&
1004 (ctrl & NVME_RW_PRINFO_PRACT && nvme_msize(ns) == 8)) {
1005 goto out;
1008 if (nvme_ns_ext(ns)) {
1009 NvmeSg sg;
1011 len += nvme_m2b(ns, nlb);
1013 status = nvme_map_dptr(n, &sg, len, &req->cmd);
1014 if (status) {
1015 return status;
1018 nvme_sg_init(n, &req->sg, sg.flags & NVME_SG_DMA);
1019 nvme_sg_split(&sg, ns, &req->sg, NULL);
1020 nvme_sg_unmap(&sg);
1022 return NVME_SUCCESS;
1025 out:
1026 return nvme_map_dptr(n, &req->sg, len, &req->cmd);
1029 static uint16_t nvme_map_mdata(NvmeCtrl *n, uint32_t nlb, NvmeRequest *req)
1031 NvmeNamespace *ns = req->ns;
1032 size_t len = nvme_m2b(ns, nlb);
1033 uint16_t status;
1035 if (nvme_ns_ext(ns)) {
1036 NvmeSg sg;
1038 len += nvme_l2b(ns, nlb);
1040 status = nvme_map_dptr(n, &sg, len, &req->cmd);
1041 if (status) {
1042 return status;
1045 nvme_sg_init(n, &req->sg, sg.flags & NVME_SG_DMA);
1046 nvme_sg_split(&sg, ns, NULL, &req->sg);
1047 nvme_sg_unmap(&sg);
1049 return NVME_SUCCESS;
1052 return nvme_map_mptr(n, &req->sg, len, &req->cmd);
1055 static uint16_t nvme_tx_interleaved(NvmeCtrl *n, NvmeSg *sg, uint8_t *ptr,
1056 uint32_t len, uint32_t bytes,
1057 int32_t skip_bytes, int64_t offset,
1058 NvmeTxDirection dir)
1060 hwaddr addr;
1061 uint32_t trans_len, count = bytes;
1062 bool dma = sg->flags & NVME_SG_DMA;
1063 int64_t sge_len;
1064 int sg_idx = 0;
1065 int ret;
1067 assert(sg->flags & NVME_SG_ALLOC);
1069 while (len) {
1070 sge_len = dma ? sg->qsg.sg[sg_idx].len : sg->iov.iov[sg_idx].iov_len;
1072 if (sge_len - offset < 0) {
1073 offset -= sge_len;
1074 sg_idx++;
1075 continue;
1078 if (sge_len == offset) {
1079 offset = 0;
1080 sg_idx++;
1081 continue;
1084 trans_len = MIN(len, count);
1085 trans_len = MIN(trans_len, sge_len - offset);
1087 if (dma) {
1088 addr = sg->qsg.sg[sg_idx].base + offset;
1089 } else {
1090 addr = (hwaddr)(uintptr_t)sg->iov.iov[sg_idx].iov_base + offset;
1093 if (dir == NVME_TX_DIRECTION_TO_DEVICE) {
1094 ret = nvme_addr_read(n, addr, ptr, trans_len);
1095 } else {
1096 ret = nvme_addr_write(n, addr, ptr, trans_len);
1099 if (ret) {
1100 return NVME_DATA_TRAS_ERROR;
1103 ptr += trans_len;
1104 len -= trans_len;
1105 count -= trans_len;
1106 offset += trans_len;
1108 if (count == 0) {
1109 count = bytes;
1110 offset += skip_bytes;
1114 return NVME_SUCCESS;
1117 static uint16_t nvme_tx(NvmeCtrl *n, NvmeSg *sg, uint8_t *ptr, uint32_t len,
1118 NvmeTxDirection dir)
1120 assert(sg->flags & NVME_SG_ALLOC);
1122 if (sg->flags & NVME_SG_DMA) {
1123 uint64_t residual;
1125 if (dir == NVME_TX_DIRECTION_TO_DEVICE) {
1126 residual = dma_buf_write(ptr, len, &sg->qsg);
1127 } else {
1128 residual = dma_buf_read(ptr, len, &sg->qsg);
1131 if (unlikely(residual)) {
1132 trace_pci_nvme_err_invalid_dma();
1133 return NVME_INVALID_FIELD | NVME_DNR;
1135 } else {
1136 size_t bytes;
1138 if (dir == NVME_TX_DIRECTION_TO_DEVICE) {
1139 bytes = qemu_iovec_to_buf(&sg->iov, 0, ptr, len);
1140 } else {
1141 bytes = qemu_iovec_from_buf(&sg->iov, 0, ptr, len);
1144 if (unlikely(bytes != len)) {
1145 trace_pci_nvme_err_invalid_dma();
1146 return NVME_INVALID_FIELD | NVME_DNR;
1150 return NVME_SUCCESS;
1153 static inline uint16_t nvme_c2h(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
1154 NvmeRequest *req)
1156 uint16_t status;
1158 status = nvme_map_dptr(n, &req->sg, len, &req->cmd);
1159 if (status) {
1160 return status;
1163 return nvme_tx(n, &req->sg, ptr, len, NVME_TX_DIRECTION_FROM_DEVICE);
1166 static inline uint16_t nvme_h2c(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
1167 NvmeRequest *req)
1169 uint16_t status;
1171 status = nvme_map_dptr(n, &req->sg, len, &req->cmd);
1172 if (status) {
1173 return status;
1176 return nvme_tx(n, &req->sg, ptr, len, NVME_TX_DIRECTION_TO_DEVICE);
1179 uint16_t nvme_bounce_data(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
1180 NvmeTxDirection dir, NvmeRequest *req)
1182 NvmeNamespace *ns = req->ns;
1183 NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd;
1184 uint16_t ctrl = le16_to_cpu(rw->control);
1186 if (nvme_ns_ext(ns) &&
1187 !(ctrl & NVME_RW_PRINFO_PRACT && nvme_msize(ns) == 8)) {
1188 size_t lsize = nvme_lsize(ns);
1189 size_t msize = nvme_msize(ns);
1191 return nvme_tx_interleaved(n, &req->sg, ptr, len, lsize, msize, 0,
1192 dir);
1195 return nvme_tx(n, &req->sg, ptr, len, dir);
1198 uint16_t nvme_bounce_mdata(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
1199 NvmeTxDirection dir, NvmeRequest *req)
1201 NvmeNamespace *ns = req->ns;
1202 uint16_t status;
1204 if (nvme_ns_ext(ns)) {
1205 size_t lsize = nvme_lsize(ns);
1206 size_t msize = nvme_msize(ns);
1208 return nvme_tx_interleaved(n, &req->sg, ptr, len, msize, lsize, lsize,
1209 dir);
1212 nvme_sg_unmap(&req->sg);
1214 status = nvme_map_mptr(n, &req->sg, len, &req->cmd);
1215 if (status) {
1216 return status;
1219 return nvme_tx(n, &req->sg, ptr, len, dir);
1222 static inline void nvme_blk_read(BlockBackend *blk, int64_t offset,
1223 BlockCompletionFunc *cb, NvmeRequest *req)
1225 assert(req->sg.flags & NVME_SG_ALLOC);
1227 if (req->sg.flags & NVME_SG_DMA) {
1228 req->aiocb = dma_blk_read(blk, &req->sg.qsg, offset, BDRV_SECTOR_SIZE,
1229 cb, req);
1230 } else {
1231 req->aiocb = blk_aio_preadv(blk, offset, &req->sg.iov, 0, cb, req);
1235 static inline void nvme_blk_write(BlockBackend *blk, int64_t offset,
1236 BlockCompletionFunc *cb, NvmeRequest *req)
1238 assert(req->sg.flags & NVME_SG_ALLOC);
1240 if (req->sg.flags & NVME_SG_DMA) {
1241 req->aiocb = dma_blk_write(blk, &req->sg.qsg, offset, BDRV_SECTOR_SIZE,
1242 cb, req);
1243 } else {
1244 req->aiocb = blk_aio_pwritev(blk, offset, &req->sg.iov, 0, cb, req);
1248 static void nvme_post_cqes(void *opaque)
1250 NvmeCQueue *cq = opaque;
1251 NvmeCtrl *n = cq->ctrl;
1252 NvmeRequest *req, *next;
1253 int ret;
1255 QTAILQ_FOREACH_SAFE(req, &cq->req_list, entry, next) {
1256 NvmeSQueue *sq;
1257 hwaddr addr;
1259 if (nvme_cq_full(cq)) {
1260 break;
1263 sq = req->sq;
1264 req->cqe.status = cpu_to_le16((req->status << 1) | cq->phase);
1265 req->cqe.sq_id = cpu_to_le16(sq->sqid);
1266 req->cqe.sq_head = cpu_to_le16(sq->head);
1267 addr = cq->dma_addr + cq->tail * n->cqe_size;
1268 ret = pci_dma_write(&n->parent_obj, addr, (void *)&req->cqe,
1269 sizeof(req->cqe));
1270 if (ret) {
1271 trace_pci_nvme_err_addr_write(addr);
1272 trace_pci_nvme_err_cfs();
1273 n->bar.csts = NVME_CSTS_FAILED;
1274 break;
1276 QTAILQ_REMOVE(&cq->req_list, req, entry);
1277 nvme_inc_cq_tail(cq);
1278 nvme_sg_unmap(&req->sg);
1279 QTAILQ_INSERT_TAIL(&sq->req_list, req, entry);
1281 if (cq->tail != cq->head) {
1282 nvme_irq_assert(n, cq);
1286 static void nvme_enqueue_req_completion(NvmeCQueue *cq, NvmeRequest *req)
1288 assert(cq->cqid == req->sq->cqid);
1289 trace_pci_nvme_enqueue_req_completion(nvme_cid(req), cq->cqid,
1290 req->status);
1292 if (req->status) {
1293 trace_pci_nvme_err_req_status(nvme_cid(req), nvme_nsid(req->ns),
1294 req->status, req->cmd.opcode);
1297 QTAILQ_REMOVE(&req->sq->out_req_list, req, entry);
1298 QTAILQ_INSERT_TAIL(&cq->req_list, req, entry);
1299 timer_mod(cq->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 500);
1302 static void nvme_process_aers(void *opaque)
1304 NvmeCtrl *n = opaque;
1305 NvmeAsyncEvent *event, *next;
1307 trace_pci_nvme_process_aers(n->aer_queued);
1309 QTAILQ_FOREACH_SAFE(event, &n->aer_queue, entry, next) {
1310 NvmeRequest *req;
1311 NvmeAerResult *result;
1313 /* can't post cqe if there is nothing to complete */
1314 if (!n->outstanding_aers) {
1315 trace_pci_nvme_no_outstanding_aers();
1316 break;
1319 /* ignore if masked (cqe posted, but event not cleared) */
1320 if (n->aer_mask & (1 << event->result.event_type)) {
1321 trace_pci_nvme_aer_masked(event->result.event_type, n->aer_mask);
1322 continue;
1325 QTAILQ_REMOVE(&n->aer_queue, event, entry);
1326 n->aer_queued--;
1328 n->aer_mask |= 1 << event->result.event_type;
1329 n->outstanding_aers--;
1331 req = n->aer_reqs[n->outstanding_aers];
1333 result = (NvmeAerResult *) &req->cqe.result;
1334 result->event_type = event->result.event_type;
1335 result->event_info = event->result.event_info;
1336 result->log_page = event->result.log_page;
1337 g_free(event);
1339 trace_pci_nvme_aer_post_cqe(result->event_type, result->event_info,
1340 result->log_page);
1342 nvme_enqueue_req_completion(&n->admin_cq, req);
1346 static void nvme_enqueue_event(NvmeCtrl *n, uint8_t event_type,
1347 uint8_t event_info, uint8_t log_page)
1349 NvmeAsyncEvent *event;
1351 trace_pci_nvme_enqueue_event(event_type, event_info, log_page);
1353 if (n->aer_queued == n->params.aer_max_queued) {
1354 trace_pci_nvme_enqueue_event_noqueue(n->aer_queued);
1355 return;
1358 event = g_new(NvmeAsyncEvent, 1);
1359 event->result = (NvmeAerResult) {
1360 .event_type = event_type,
1361 .event_info = event_info,
1362 .log_page = log_page,
1365 QTAILQ_INSERT_TAIL(&n->aer_queue, event, entry);
1366 n->aer_queued++;
1368 nvme_process_aers(n);
1371 static void nvme_smart_event(NvmeCtrl *n, uint8_t event)
1373 uint8_t aer_info;
1375 /* Ref SPEC <Asynchronous Event Information 0x2013 SMART / Health Status> */
1376 if (!(NVME_AEC_SMART(n->features.async_config) & event)) {
1377 return;
1380 switch (event) {
1381 case NVME_SMART_SPARE:
1382 aer_info = NVME_AER_INFO_SMART_SPARE_THRESH;
1383 break;
1384 case NVME_SMART_TEMPERATURE:
1385 aer_info = NVME_AER_INFO_SMART_TEMP_THRESH;
1386 break;
1387 case NVME_SMART_RELIABILITY:
1388 case NVME_SMART_MEDIA_READ_ONLY:
1389 case NVME_SMART_FAILED_VOLATILE_MEDIA:
1390 case NVME_SMART_PMR_UNRELIABLE:
1391 aer_info = NVME_AER_INFO_SMART_RELIABILITY;
1392 break;
1393 default:
1394 return;
1397 nvme_enqueue_event(n, NVME_AER_TYPE_SMART, aer_info, NVME_LOG_SMART_INFO);
1400 static void nvme_clear_events(NvmeCtrl *n, uint8_t event_type)
1402 n->aer_mask &= ~(1 << event_type);
1403 if (!QTAILQ_EMPTY(&n->aer_queue)) {
1404 nvme_process_aers(n);
1408 static inline uint16_t nvme_check_mdts(NvmeCtrl *n, size_t len)
1410 uint8_t mdts = n->params.mdts;
1412 if (mdts && len > n->page_size << mdts) {
1413 trace_pci_nvme_err_mdts(len);
1414 return NVME_INVALID_FIELD | NVME_DNR;
1417 return NVME_SUCCESS;
1420 static inline uint16_t nvme_check_bounds(NvmeNamespace *ns, uint64_t slba,
1421 uint32_t nlb)
1423 uint64_t nsze = le64_to_cpu(ns->id_ns.nsze);
1425 if (unlikely(UINT64_MAX - slba < nlb || slba + nlb > nsze)) {
1426 return NVME_LBA_RANGE | NVME_DNR;
1429 return NVME_SUCCESS;
1432 static uint16_t nvme_check_dulbe(NvmeNamespace *ns, uint64_t slba,
1433 uint32_t nlb)
1435 BlockDriverState *bs = blk_bs(ns->blkconf.blk);
1437 int64_t pnum = 0, bytes = nvme_l2b(ns, nlb);
1438 int64_t offset = nvme_l2b(ns, slba);
1439 bool zeroed;
1440 int ret;
1442 Error *local_err = NULL;
1445 * `pnum` holds the number of bytes after offset that shares the same
1446 * allocation status as the byte at offset. If `pnum` is different from
1447 * `bytes`, we should check the allocation status of the next range and
1448 * continue this until all bytes have been checked.
1450 do {
1451 bytes -= pnum;
1453 ret = bdrv_block_status(bs, offset, bytes, &pnum, NULL, NULL);
1454 if (ret < 0) {
1455 error_setg_errno(&local_err, -ret, "unable to get block status");
1456 error_report_err(local_err);
1458 return NVME_INTERNAL_DEV_ERROR;
1461 zeroed = !!(ret & BDRV_BLOCK_ZERO);
1463 trace_pci_nvme_block_status(offset, bytes, pnum, ret, zeroed);
1465 if (zeroed) {
1466 return NVME_DULB;
1469 offset += pnum;
1470 } while (pnum != bytes);
1472 return NVME_SUCCESS;
1475 static void nvme_aio_err(NvmeRequest *req, int ret)
1477 uint16_t status = NVME_SUCCESS;
1478 Error *local_err = NULL;
1480 switch (req->cmd.opcode) {
1481 case NVME_CMD_READ:
1482 status = NVME_UNRECOVERED_READ;
1483 break;
1484 case NVME_CMD_FLUSH:
1485 case NVME_CMD_WRITE:
1486 case NVME_CMD_WRITE_ZEROES:
1487 case NVME_CMD_ZONE_APPEND:
1488 status = NVME_WRITE_FAULT;
1489 break;
1490 default:
1491 status = NVME_INTERNAL_DEV_ERROR;
1492 break;
1495 trace_pci_nvme_err_aio(nvme_cid(req), strerror(-ret), status);
1497 error_setg_errno(&local_err, -ret, "aio failed");
1498 error_report_err(local_err);
1501 * Set the command status code to the first encountered error but allow a
1502 * subsequent Internal Device Error to trump it.
1504 if (req->status && status != NVME_INTERNAL_DEV_ERROR) {
1505 return;
1508 req->status = status;
1511 static inline uint32_t nvme_zone_idx(NvmeNamespace *ns, uint64_t slba)
1513 return ns->zone_size_log2 > 0 ? slba >> ns->zone_size_log2 :
1514 slba / ns->zone_size;
1517 static inline NvmeZone *nvme_get_zone_by_slba(NvmeNamespace *ns, uint64_t slba)
1519 uint32_t zone_idx = nvme_zone_idx(ns, slba);
1521 assert(zone_idx < ns->num_zones);
1522 return &ns->zone_array[zone_idx];
1525 static uint16_t nvme_check_zone_state_for_write(NvmeZone *zone)
1527 uint64_t zslba = zone->d.zslba;
1529 switch (nvme_get_zone_state(zone)) {
1530 case NVME_ZONE_STATE_EMPTY:
1531 case NVME_ZONE_STATE_IMPLICITLY_OPEN:
1532 case NVME_ZONE_STATE_EXPLICITLY_OPEN:
1533 case NVME_ZONE_STATE_CLOSED:
1534 return NVME_SUCCESS;
1535 case NVME_ZONE_STATE_FULL:
1536 trace_pci_nvme_err_zone_is_full(zslba);
1537 return NVME_ZONE_FULL;
1538 case NVME_ZONE_STATE_OFFLINE:
1539 trace_pci_nvme_err_zone_is_offline(zslba);
1540 return NVME_ZONE_OFFLINE;
1541 case NVME_ZONE_STATE_READ_ONLY:
1542 trace_pci_nvme_err_zone_is_read_only(zslba);
1543 return NVME_ZONE_READ_ONLY;
1544 default:
1545 assert(false);
1548 return NVME_INTERNAL_DEV_ERROR;
1551 static uint16_t nvme_check_zone_write(NvmeNamespace *ns, NvmeZone *zone,
1552 uint64_t slba, uint32_t nlb)
1554 uint64_t zcap = nvme_zone_wr_boundary(zone);
1555 uint16_t status;
1557 status = nvme_check_zone_state_for_write(zone);
1558 if (status) {
1559 return status;
1562 if (unlikely(slba != zone->w_ptr)) {
1563 trace_pci_nvme_err_write_not_at_wp(slba, zone->d.zslba, zone->w_ptr);
1564 return NVME_ZONE_INVALID_WRITE;
1567 if (unlikely((slba + nlb) > zcap)) {
1568 trace_pci_nvme_err_zone_boundary(slba, nlb, zcap);
1569 return NVME_ZONE_BOUNDARY_ERROR;
1572 return NVME_SUCCESS;
1575 static uint16_t nvme_check_zone_state_for_read(NvmeZone *zone)
1577 switch (nvme_get_zone_state(zone)) {
1578 case NVME_ZONE_STATE_EMPTY:
1579 case NVME_ZONE_STATE_IMPLICITLY_OPEN:
1580 case NVME_ZONE_STATE_EXPLICITLY_OPEN:
1581 case NVME_ZONE_STATE_FULL:
1582 case NVME_ZONE_STATE_CLOSED:
1583 case NVME_ZONE_STATE_READ_ONLY:
1584 return NVME_SUCCESS;
1585 case NVME_ZONE_STATE_OFFLINE:
1586 trace_pci_nvme_err_zone_is_offline(zone->d.zslba);
1587 return NVME_ZONE_OFFLINE;
1588 default:
1589 assert(false);
1592 return NVME_INTERNAL_DEV_ERROR;
1595 static uint16_t nvme_check_zone_read(NvmeNamespace *ns, uint64_t slba,
1596 uint32_t nlb)
1598 NvmeZone *zone = nvme_get_zone_by_slba(ns, slba);
1599 uint64_t bndry = nvme_zone_rd_boundary(ns, zone);
1600 uint64_t end = slba + nlb;
1601 uint16_t status;
1603 status = nvme_check_zone_state_for_read(zone);
1604 if (status) {
1606 } else if (unlikely(end > bndry)) {
1607 if (!ns->params.cross_zone_read) {
1608 status = NVME_ZONE_BOUNDARY_ERROR;
1609 } else {
1611 * Read across zone boundary - check that all subsequent
1612 * zones that are being read have an appropriate state.
1614 do {
1615 zone++;
1616 status = nvme_check_zone_state_for_read(zone);
1617 if (status) {
1618 break;
1620 } while (end > nvme_zone_rd_boundary(ns, zone));
1624 return status;
1627 static uint16_t nvme_zrm_finish(NvmeNamespace *ns, NvmeZone *zone)
1629 switch (nvme_get_zone_state(zone)) {
1630 case NVME_ZONE_STATE_FULL:
1631 return NVME_SUCCESS;
1633 case NVME_ZONE_STATE_IMPLICITLY_OPEN:
1634 case NVME_ZONE_STATE_EXPLICITLY_OPEN:
1635 nvme_aor_dec_open(ns);
1636 /* fallthrough */
1637 case NVME_ZONE_STATE_CLOSED:
1638 nvme_aor_dec_active(ns);
1639 /* fallthrough */
1640 case NVME_ZONE_STATE_EMPTY:
1641 nvme_assign_zone_state(ns, zone, NVME_ZONE_STATE_FULL);
1642 return NVME_SUCCESS;
1644 default:
1645 return NVME_ZONE_INVAL_TRANSITION;
1649 static uint16_t nvme_zrm_close(NvmeNamespace *ns, NvmeZone *zone)
1651 switch (nvme_get_zone_state(zone)) {
1652 case NVME_ZONE_STATE_EXPLICITLY_OPEN:
1653 case NVME_ZONE_STATE_IMPLICITLY_OPEN:
1654 nvme_aor_dec_open(ns);
1655 nvme_assign_zone_state(ns, zone, NVME_ZONE_STATE_CLOSED);
1656 /* fall through */
1657 case NVME_ZONE_STATE_CLOSED:
1658 return NVME_SUCCESS;
1660 default:
1661 return NVME_ZONE_INVAL_TRANSITION;
1665 static void nvme_zrm_auto_transition_zone(NvmeNamespace *ns)
1667 NvmeZone *zone;
1669 if (ns->params.max_open_zones &&
1670 ns->nr_open_zones == ns->params.max_open_zones) {
1671 zone = QTAILQ_FIRST(&ns->imp_open_zones);
1672 if (zone) {
1674 * Automatically close this implicitly open zone.
1676 QTAILQ_REMOVE(&ns->imp_open_zones, zone, entry);
1677 nvme_zrm_close(ns, zone);
1682 static uint16_t __nvme_zrm_open(NvmeNamespace *ns, NvmeZone *zone,
1683 bool implicit)
1685 int act = 0;
1686 uint16_t status;
1688 switch (nvme_get_zone_state(zone)) {
1689 case NVME_ZONE_STATE_EMPTY:
1690 act = 1;
1692 /* fallthrough */
1694 case NVME_ZONE_STATE_CLOSED:
1695 nvme_zrm_auto_transition_zone(ns);
1696 status = nvme_aor_check(ns, act, 1);
1697 if (status) {
1698 return status;
1701 if (act) {
1702 nvme_aor_inc_active(ns);
1705 nvme_aor_inc_open(ns);
1707 if (implicit) {
1708 nvme_assign_zone_state(ns, zone, NVME_ZONE_STATE_IMPLICITLY_OPEN);
1709 return NVME_SUCCESS;
1712 /* fallthrough */
1714 case NVME_ZONE_STATE_IMPLICITLY_OPEN:
1715 if (implicit) {
1716 return NVME_SUCCESS;
1719 nvme_assign_zone_state(ns, zone, NVME_ZONE_STATE_EXPLICITLY_OPEN);
1721 /* fallthrough */
1723 case NVME_ZONE_STATE_EXPLICITLY_OPEN:
1724 return NVME_SUCCESS;
1726 default:
1727 return NVME_ZONE_INVAL_TRANSITION;
1731 static inline uint16_t nvme_zrm_auto(NvmeNamespace *ns, NvmeZone *zone)
1733 return __nvme_zrm_open(ns, zone, true);
1736 static inline uint16_t nvme_zrm_open(NvmeNamespace *ns, NvmeZone *zone)
1738 return __nvme_zrm_open(ns, zone, false);
1741 static void __nvme_advance_zone_wp(NvmeNamespace *ns, NvmeZone *zone,
1742 uint32_t nlb)
1744 zone->d.wp += nlb;
1746 if (zone->d.wp == nvme_zone_wr_boundary(zone)) {
1747 nvme_zrm_finish(ns, zone);
1751 static void nvme_finalize_zoned_write(NvmeNamespace *ns, NvmeRequest *req)
1753 NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd;
1754 NvmeZone *zone;
1755 uint64_t slba;
1756 uint32_t nlb;
1758 slba = le64_to_cpu(rw->slba);
1759 nlb = le16_to_cpu(rw->nlb) + 1;
1760 zone = nvme_get_zone_by_slba(ns, slba);
1762 __nvme_advance_zone_wp(ns, zone, nlb);
1765 static inline bool nvme_is_write(NvmeRequest *req)
1767 NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd;
1769 return rw->opcode == NVME_CMD_WRITE ||
1770 rw->opcode == NVME_CMD_ZONE_APPEND ||
1771 rw->opcode == NVME_CMD_WRITE_ZEROES;
1774 static void nvme_misc_cb(void *opaque, int ret)
1776 NvmeRequest *req = opaque;
1777 NvmeNamespace *ns = req->ns;
1779 BlockBackend *blk = ns->blkconf.blk;
1780 BlockAcctCookie *acct = &req->acct;
1781 BlockAcctStats *stats = blk_get_stats(blk);
1783 trace_pci_nvme_misc_cb(nvme_cid(req), blk_name(blk));
1785 if (ret) {
1786 block_acct_failed(stats, acct);
1787 nvme_aio_err(req, ret);
1788 } else {
1789 block_acct_done(stats, acct);
1792 nvme_enqueue_req_completion(nvme_cq(req), req);
1795 void nvme_rw_complete_cb(void *opaque, int ret)
1797 NvmeRequest *req = opaque;
1798 NvmeNamespace *ns = req->ns;
1799 BlockBackend *blk = ns->blkconf.blk;
1800 BlockAcctCookie *acct = &req->acct;
1801 BlockAcctStats *stats = blk_get_stats(blk);
1803 trace_pci_nvme_rw_complete_cb(nvme_cid(req), blk_name(blk));
1805 if (ret) {
1806 block_acct_failed(stats, acct);
1807 nvme_aio_err(req, ret);
1808 } else {
1809 block_acct_done(stats, acct);
1812 if (ns->params.zoned && nvme_is_write(req)) {
1813 nvme_finalize_zoned_write(ns, req);
1816 nvme_enqueue_req_completion(nvme_cq(req), req);
1819 static void nvme_rw_cb(void *opaque, int ret)
1821 NvmeRequest *req = opaque;
1822 NvmeNamespace *ns = req->ns;
1824 BlockBackend *blk = ns->blkconf.blk;
1826 trace_pci_nvme_rw_cb(nvme_cid(req), blk_name(blk));
1828 if (ret) {
1829 goto out;
1832 if (nvme_msize(ns)) {
1833 NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd;
1834 uint64_t slba = le64_to_cpu(rw->slba);
1835 uint32_t nlb = (uint32_t)le16_to_cpu(rw->nlb) + 1;
1836 uint64_t offset = ns->mdata_offset + nvme_m2b(ns, slba);
1838 if (req->cmd.opcode == NVME_CMD_WRITE_ZEROES) {
1839 size_t mlen = nvme_m2b(ns, nlb);
1841 req->aiocb = blk_aio_pwrite_zeroes(blk, offset, mlen,
1842 BDRV_REQ_MAY_UNMAP,
1843 nvme_rw_complete_cb, req);
1844 return;
1847 if (nvme_ns_ext(ns) || req->cmd.mptr) {
1848 uint16_t status;
1850 nvme_sg_unmap(&req->sg);
1851 status = nvme_map_mdata(nvme_ctrl(req), nlb, req);
1852 if (status) {
1853 ret = -EFAULT;
1854 goto out;
1857 if (req->cmd.opcode == NVME_CMD_READ) {
1858 return nvme_blk_read(blk, offset, nvme_rw_complete_cb, req);
1861 return nvme_blk_write(blk, offset, nvme_rw_complete_cb, req);
1865 out:
1866 nvme_rw_complete_cb(req, ret);
1869 struct nvme_aio_flush_ctx {
1870 NvmeRequest *req;
1871 NvmeNamespace *ns;
1872 BlockAcctCookie acct;
1875 static void nvme_aio_flush_cb(void *opaque, int ret)
1877 struct nvme_aio_flush_ctx *ctx = opaque;
1878 NvmeRequest *req = ctx->req;
1879 uintptr_t *num_flushes = (uintptr_t *)&req->opaque;
1881 BlockBackend *blk = ctx->ns->blkconf.blk;
1882 BlockAcctCookie *acct = &ctx->acct;
1883 BlockAcctStats *stats = blk_get_stats(blk);
1885 trace_pci_nvme_aio_flush_cb(nvme_cid(req), blk_name(blk));
1887 if (!ret) {
1888 block_acct_done(stats, acct);
1889 } else {
1890 block_acct_failed(stats, acct);
1891 nvme_aio_err(req, ret);
1894 (*num_flushes)--;
1895 g_free(ctx);
1897 if (*num_flushes) {
1898 return;
1901 nvme_enqueue_req_completion(nvme_cq(req), req);
1904 static void nvme_verify_cb(void *opaque, int ret)
1906 NvmeBounceContext *ctx = opaque;
1907 NvmeRequest *req = ctx->req;
1908 NvmeNamespace *ns = req->ns;
1909 BlockBackend *blk = ns->blkconf.blk;
1910 BlockAcctCookie *acct = &req->acct;
1911 BlockAcctStats *stats = blk_get_stats(blk);
1912 NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd;
1913 uint64_t slba = le64_to_cpu(rw->slba);
1914 uint16_t ctrl = le16_to_cpu(rw->control);
1915 uint16_t apptag = le16_to_cpu(rw->apptag);
1916 uint16_t appmask = le16_to_cpu(rw->appmask);
1917 uint32_t reftag = le32_to_cpu(rw->reftag);
1918 uint16_t status;
1920 trace_pci_nvme_verify_cb(nvme_cid(req), NVME_RW_PRINFO(ctrl), apptag,
1921 appmask, reftag);
1923 if (ret) {
1924 block_acct_failed(stats, acct);
1925 nvme_aio_err(req, ret);
1926 goto out;
1929 block_acct_done(stats, acct);
1931 if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) {
1932 status = nvme_dif_mangle_mdata(ns, ctx->mdata.bounce,
1933 ctx->mdata.iov.size, slba);
1934 if (status) {
1935 req->status = status;
1936 goto out;
1939 req->status = nvme_dif_check(ns, ctx->data.bounce, ctx->data.iov.size,
1940 ctx->mdata.bounce, ctx->mdata.iov.size,
1941 ctrl, slba, apptag, appmask, reftag);
1944 out:
1945 qemu_iovec_destroy(&ctx->data.iov);
1946 g_free(ctx->data.bounce);
1948 qemu_iovec_destroy(&ctx->mdata.iov);
1949 g_free(ctx->mdata.bounce);
1951 g_free(ctx);
1953 nvme_enqueue_req_completion(nvme_cq(req), req);
1957 static void nvme_verify_mdata_in_cb(void *opaque, int ret)
1959 NvmeBounceContext *ctx = opaque;
1960 NvmeRequest *req = ctx->req;
1961 NvmeNamespace *ns = req->ns;
1962 NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd;
1963 uint64_t slba = le64_to_cpu(rw->slba);
1964 uint32_t nlb = le16_to_cpu(rw->nlb) + 1;
1965 size_t mlen = nvme_m2b(ns, nlb);
1966 uint64_t offset = ns->mdata_offset + nvme_m2b(ns, slba);
1967 BlockBackend *blk = ns->blkconf.blk;
1969 trace_pci_nvme_verify_mdata_in_cb(nvme_cid(req), blk_name(blk));
1971 if (ret) {
1972 goto out;
1975 ctx->mdata.bounce = g_malloc(mlen);
1977 qemu_iovec_reset(&ctx->mdata.iov);
1978 qemu_iovec_add(&ctx->mdata.iov, ctx->mdata.bounce, mlen);
1980 req->aiocb = blk_aio_preadv(blk, offset, &ctx->mdata.iov, 0,
1981 nvme_verify_cb, ctx);
1982 return;
1984 out:
1985 nvme_verify_cb(ctx, ret);
1988 static void nvme_aio_discard_cb(void *opaque, int ret)
1990 NvmeRequest *req = opaque;
1991 uintptr_t *discards = (uintptr_t *)&req->opaque;
1993 trace_pci_nvme_aio_discard_cb(nvme_cid(req));
1995 if (ret) {
1996 nvme_aio_err(req, ret);
1999 (*discards)--;
2001 if (*discards) {
2002 return;
2005 nvme_enqueue_req_completion(nvme_cq(req), req);
2008 struct nvme_zone_reset_ctx {
2009 NvmeRequest *req;
2010 NvmeZone *zone;
2013 static void nvme_aio_zone_reset_complete_cb(void *opaque, int ret)
2015 struct nvme_zone_reset_ctx *ctx = opaque;
2016 NvmeRequest *req = ctx->req;
2017 NvmeNamespace *ns = req->ns;
2018 NvmeZone *zone = ctx->zone;
2019 uintptr_t *resets = (uintptr_t *)&req->opaque;
2021 if (ret) {
2022 nvme_aio_err(req, ret);
2023 goto out;
2026 switch (nvme_get_zone_state(zone)) {
2027 case NVME_ZONE_STATE_EXPLICITLY_OPEN:
2028 case NVME_ZONE_STATE_IMPLICITLY_OPEN:
2029 nvme_aor_dec_open(ns);
2030 /* fall through */
2031 case NVME_ZONE_STATE_CLOSED:
2032 nvme_aor_dec_active(ns);
2033 /* fall through */
2034 case NVME_ZONE_STATE_FULL:
2035 zone->w_ptr = zone->d.zslba;
2036 zone->d.wp = zone->w_ptr;
2037 nvme_assign_zone_state(ns, zone, NVME_ZONE_STATE_EMPTY);
2038 /* fall through */
2039 default:
2040 break;
2043 out:
2044 g_free(ctx);
2046 (*resets)--;
2048 if (*resets) {
2049 return;
2052 nvme_enqueue_req_completion(nvme_cq(req), req);
2055 static void nvme_aio_zone_reset_cb(void *opaque, int ret)
2057 struct nvme_zone_reset_ctx *ctx = opaque;
2058 NvmeRequest *req = ctx->req;
2059 NvmeNamespace *ns = req->ns;
2060 NvmeZone *zone = ctx->zone;
2062 trace_pci_nvme_aio_zone_reset_cb(nvme_cid(req), zone->d.zslba);
2064 if (ret) {
2065 goto out;
2068 if (nvme_msize(ns)) {
2069 int64_t offset = ns->mdata_offset + nvme_m2b(ns, zone->d.zslba);
2071 blk_aio_pwrite_zeroes(ns->blkconf.blk, offset,
2072 nvme_m2b(ns, ns->zone_size), BDRV_REQ_MAY_UNMAP,
2073 nvme_aio_zone_reset_complete_cb, ctx);
2074 return;
2077 out:
2078 nvme_aio_zone_reset_complete_cb(opaque, ret);
2081 struct nvme_copy_ctx {
2082 int copies;
2083 uint8_t *bounce;
2084 uint8_t *mbounce;
2085 uint32_t nlb;
2086 NvmeCopySourceRange *ranges;
2089 struct nvme_copy_in_ctx {
2090 NvmeRequest *req;
2091 QEMUIOVector iov;
2092 NvmeCopySourceRange *range;
2095 static void nvme_copy_complete_cb(void *opaque, int ret)
2097 NvmeRequest *req = opaque;
2098 NvmeNamespace *ns = req->ns;
2099 struct nvme_copy_ctx *ctx = req->opaque;
2101 if (ret) {
2102 block_acct_failed(blk_get_stats(ns->blkconf.blk), &req->acct);
2103 nvme_aio_err(req, ret);
2104 goto out;
2107 block_acct_done(blk_get_stats(ns->blkconf.blk), &req->acct);
2109 out:
2110 if (ns->params.zoned) {
2111 NvmeCopyCmd *copy = (NvmeCopyCmd *)&req->cmd;
2112 uint64_t sdlba = le64_to_cpu(copy->sdlba);
2113 NvmeZone *zone = nvme_get_zone_by_slba(ns, sdlba);
2115 __nvme_advance_zone_wp(ns, zone, ctx->nlb);
2118 g_free(ctx->bounce);
2119 g_free(ctx->mbounce);
2120 g_free(ctx);
2122 nvme_enqueue_req_completion(nvme_cq(req), req);
2125 static void nvme_copy_cb(void *opaque, int ret)
2127 NvmeRequest *req = opaque;
2128 NvmeNamespace *ns = req->ns;
2129 struct nvme_copy_ctx *ctx = req->opaque;
2131 trace_pci_nvme_copy_cb(nvme_cid(req));
2133 if (ret) {
2134 goto out;
2137 if (nvme_msize(ns)) {
2138 NvmeCopyCmd *copy = (NvmeCopyCmd *)&req->cmd;
2139 uint64_t sdlba = le64_to_cpu(copy->sdlba);
2140 int64_t offset = ns->mdata_offset + nvme_m2b(ns, sdlba);
2142 qemu_iovec_reset(&req->sg.iov);
2143 qemu_iovec_add(&req->sg.iov, ctx->mbounce, nvme_m2b(ns, ctx->nlb));
2145 req->aiocb = blk_aio_pwritev(ns->blkconf.blk, offset, &req->sg.iov, 0,
2146 nvme_copy_complete_cb, req);
2147 return;
2150 out:
2151 nvme_copy_complete_cb(opaque, ret);
2154 static void nvme_copy_in_complete(NvmeRequest *req)
2156 NvmeNamespace *ns = req->ns;
2157 NvmeCopyCmd *copy = (NvmeCopyCmd *)&req->cmd;
2158 struct nvme_copy_ctx *ctx = req->opaque;
2159 uint64_t sdlba = le64_to_cpu(copy->sdlba);
2160 uint16_t status;
2162 trace_pci_nvme_copy_in_complete(nvme_cid(req));
2164 block_acct_done(blk_get_stats(ns->blkconf.blk), &req->acct);
2166 if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) {
2167 uint16_t prinfor = (copy->control[0] >> 4) & 0xf;
2168 uint16_t prinfow = (copy->control[2] >> 2) & 0xf;
2169 uint16_t nr = copy->nr + 1;
2170 NvmeCopySourceRange *range;
2171 uint64_t slba;
2172 uint32_t nlb;
2173 uint16_t apptag, appmask;
2174 uint32_t reftag;
2175 uint8_t *buf = ctx->bounce, *mbuf = ctx->mbounce;
2176 size_t len, mlen;
2177 int i;
2180 * The dif helpers expects prinfo to be similar to the control field of
2181 * the NvmeRwCmd, so shift by 10 to fake it.
2183 prinfor = prinfor << 10;
2184 prinfow = prinfow << 10;
2186 for (i = 0; i < nr; i++) {
2187 range = &ctx->ranges[i];
2188 slba = le64_to_cpu(range->slba);
2189 nlb = le16_to_cpu(range->nlb) + 1;
2190 len = nvme_l2b(ns, nlb);
2191 mlen = nvme_m2b(ns, nlb);
2192 apptag = le16_to_cpu(range->apptag);
2193 appmask = le16_to_cpu(range->appmask);
2194 reftag = le32_to_cpu(range->reftag);
2196 status = nvme_dif_check(ns, buf, len, mbuf, mlen, prinfor, slba,
2197 apptag, appmask, reftag);
2198 if (status) {
2199 goto invalid;
2202 buf += len;
2203 mbuf += mlen;
2206 apptag = le16_to_cpu(copy->apptag);
2207 appmask = le16_to_cpu(copy->appmask);
2208 reftag = le32_to_cpu(copy->reftag);
2210 if (prinfow & NVME_RW_PRINFO_PRACT) {
2211 size_t len = nvme_l2b(ns, ctx->nlb);
2212 size_t mlen = nvme_m2b(ns, ctx->nlb);
2214 status = nvme_check_prinfo(ns, prinfow, sdlba, reftag);
2215 if (status) {
2216 goto invalid;
2219 nvme_dif_pract_generate_dif(ns, ctx->bounce, len, ctx->mbounce,
2220 mlen, apptag, reftag);
2221 } else {
2222 status = nvme_dif_check(ns, ctx->bounce, len, ctx->mbounce, mlen,
2223 prinfow, sdlba, apptag, appmask, reftag);
2224 if (status) {
2225 goto invalid;
2230 status = nvme_check_bounds(ns, sdlba, ctx->nlb);
2231 if (status) {
2232 trace_pci_nvme_err_invalid_lba_range(sdlba, ctx->nlb, ns->id_ns.nsze);
2233 goto invalid;
2236 if (ns->params.zoned) {
2237 NvmeZone *zone = nvme_get_zone_by_slba(ns, sdlba);
2239 status = nvme_check_zone_write(ns, zone, sdlba, ctx->nlb);
2240 if (status) {
2241 goto invalid;
2244 status = nvme_zrm_auto(ns, zone);
2245 if (status) {
2246 goto invalid;
2249 zone->w_ptr += ctx->nlb;
2252 qemu_iovec_init(&req->sg.iov, 1);
2253 qemu_iovec_add(&req->sg.iov, ctx->bounce, nvme_l2b(ns, ctx->nlb));
2255 block_acct_start(blk_get_stats(ns->blkconf.blk), &req->acct, 0,
2256 BLOCK_ACCT_WRITE);
2258 req->aiocb = blk_aio_pwritev(ns->blkconf.blk, nvme_l2b(ns, sdlba),
2259 &req->sg.iov, 0, nvme_copy_cb, req);
2261 return;
2263 invalid:
2264 req->status = status;
2266 g_free(ctx->bounce);
2267 g_free(ctx);
2269 nvme_enqueue_req_completion(nvme_cq(req), req);
2272 static void nvme_aio_copy_in_cb(void *opaque, int ret)
2274 struct nvme_copy_in_ctx *in_ctx = opaque;
2275 NvmeRequest *req = in_ctx->req;
2276 NvmeNamespace *ns = req->ns;
2277 struct nvme_copy_ctx *ctx = req->opaque;
2279 qemu_iovec_destroy(&in_ctx->iov);
2280 g_free(in_ctx);
2282 trace_pci_nvme_aio_copy_in_cb(nvme_cid(req));
2284 if (ret) {
2285 nvme_aio_err(req, ret);
2288 ctx->copies--;
2290 if (ctx->copies) {
2291 return;
2294 if (req->status) {
2295 block_acct_failed(blk_get_stats(ns->blkconf.blk), &req->acct);
2297 g_free(ctx->bounce);
2298 g_free(ctx->mbounce);
2299 g_free(ctx);
2301 nvme_enqueue_req_completion(nvme_cq(req), req);
2303 return;
2306 nvme_copy_in_complete(req);
2309 struct nvme_compare_ctx {
2310 struct {
2311 QEMUIOVector iov;
2312 uint8_t *bounce;
2313 } data;
2315 struct {
2316 QEMUIOVector iov;
2317 uint8_t *bounce;
2318 } mdata;
2321 static void nvme_compare_mdata_cb(void *opaque, int ret)
2323 NvmeRequest *req = opaque;
2324 NvmeNamespace *ns = req->ns;
2325 NvmeCtrl *n = nvme_ctrl(req);
2326 NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd;
2327 uint16_t ctrl = le16_to_cpu(rw->control);
2328 uint16_t apptag = le16_to_cpu(rw->apptag);
2329 uint16_t appmask = le16_to_cpu(rw->appmask);
2330 uint32_t reftag = le32_to_cpu(rw->reftag);
2331 struct nvme_compare_ctx *ctx = req->opaque;
2332 g_autofree uint8_t *buf = NULL;
2333 uint16_t status = NVME_SUCCESS;
2335 trace_pci_nvme_compare_mdata_cb(nvme_cid(req));
2337 buf = g_malloc(ctx->mdata.iov.size);
2339 status = nvme_bounce_mdata(n, buf, ctx->mdata.iov.size,
2340 NVME_TX_DIRECTION_TO_DEVICE, req);
2341 if (status) {
2342 req->status = status;
2343 goto out;
2346 if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) {
2347 uint64_t slba = le64_to_cpu(rw->slba);
2348 uint8_t *bufp;
2349 uint8_t *mbufp = ctx->mdata.bounce;
2350 uint8_t *end = mbufp + ctx->mdata.iov.size;
2351 size_t msize = nvme_msize(ns);
2352 int16_t pil = 0;
2354 status = nvme_dif_check(ns, ctx->data.bounce, ctx->data.iov.size,
2355 ctx->mdata.bounce, ctx->mdata.iov.size, ctrl,
2356 slba, apptag, appmask, reftag);
2357 if (status) {
2358 req->status = status;
2359 goto out;
2363 * When formatted with protection information, do not compare the DIF
2364 * tuple.
2366 if (!(ns->id_ns.dps & NVME_ID_NS_DPS_FIRST_EIGHT)) {
2367 pil = nvme_msize(ns) - sizeof(NvmeDifTuple);
2370 for (bufp = buf; mbufp < end; bufp += msize, mbufp += msize) {
2371 if (memcmp(bufp + pil, mbufp + pil, msize - pil)) {
2372 req->status = NVME_CMP_FAILURE;
2373 goto out;
2377 goto out;
2380 if (memcmp(buf, ctx->mdata.bounce, ctx->mdata.iov.size)) {
2381 req->status = NVME_CMP_FAILURE;
2382 goto out;
2385 out:
2386 qemu_iovec_destroy(&ctx->data.iov);
2387 g_free(ctx->data.bounce);
2389 qemu_iovec_destroy(&ctx->mdata.iov);
2390 g_free(ctx->mdata.bounce);
2392 g_free(ctx);
2394 nvme_enqueue_req_completion(nvme_cq(req), req);
2397 static void nvme_compare_data_cb(void *opaque, int ret)
2399 NvmeRequest *req = opaque;
2400 NvmeCtrl *n = nvme_ctrl(req);
2401 NvmeNamespace *ns = req->ns;
2402 BlockBackend *blk = ns->blkconf.blk;
2403 BlockAcctCookie *acct = &req->acct;
2404 BlockAcctStats *stats = blk_get_stats(blk);
2406 struct nvme_compare_ctx *ctx = req->opaque;
2407 g_autofree uint8_t *buf = NULL;
2408 uint16_t status;
2410 trace_pci_nvme_compare_data_cb(nvme_cid(req));
2412 if (ret) {
2413 block_acct_failed(stats, acct);
2414 nvme_aio_err(req, ret);
2415 goto out;
2418 buf = g_malloc(ctx->data.iov.size);
2420 status = nvme_bounce_data(n, buf, ctx->data.iov.size,
2421 NVME_TX_DIRECTION_TO_DEVICE, req);
2422 if (status) {
2423 req->status = status;
2424 goto out;
2427 if (memcmp(buf, ctx->data.bounce, ctx->data.iov.size)) {
2428 req->status = NVME_CMP_FAILURE;
2429 goto out;
2432 if (nvme_msize(ns)) {
2433 NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd;
2434 uint64_t slba = le64_to_cpu(rw->slba);
2435 uint32_t nlb = le16_to_cpu(rw->nlb) + 1;
2436 size_t mlen = nvme_m2b(ns, nlb);
2437 uint64_t offset = ns->mdata_offset + nvme_m2b(ns, slba);
2439 ctx->mdata.bounce = g_malloc(mlen);
2441 qemu_iovec_init(&ctx->mdata.iov, 1);
2442 qemu_iovec_add(&ctx->mdata.iov, ctx->mdata.bounce, mlen);
2444 req->aiocb = blk_aio_preadv(blk, offset, &ctx->mdata.iov, 0,
2445 nvme_compare_mdata_cb, req);
2446 return;
2449 block_acct_done(stats, acct);
2451 out:
2452 qemu_iovec_destroy(&ctx->data.iov);
2453 g_free(ctx->data.bounce);
2454 g_free(ctx);
2456 nvme_enqueue_req_completion(nvme_cq(req), req);
2459 static uint16_t nvme_dsm(NvmeCtrl *n, NvmeRequest *req)
2461 NvmeNamespace *ns = req->ns;
2462 NvmeDsmCmd *dsm = (NvmeDsmCmd *) &req->cmd;
2464 uint32_t attr = le32_to_cpu(dsm->attributes);
2465 uint32_t nr = (le32_to_cpu(dsm->nr) & 0xff) + 1;
2467 uint16_t status = NVME_SUCCESS;
2469 trace_pci_nvme_dsm(nvme_cid(req), nvme_nsid(ns), nr, attr);
2471 if (attr & NVME_DSMGMT_AD) {
2472 int64_t offset;
2473 size_t len;
2474 NvmeDsmRange range[nr];
2475 uintptr_t *discards = (uintptr_t *)&req->opaque;
2477 status = nvme_h2c(n, (uint8_t *)range, sizeof(range), req);
2478 if (status) {
2479 return status;
2483 * AIO callbacks may be called immediately, so initialize discards to 1
2484 * to make sure the the callback does not complete the request before
2485 * all discards have been issued.
2487 *discards = 1;
2489 for (int i = 0; i < nr; i++) {
2490 uint64_t slba = le64_to_cpu(range[i].slba);
2491 uint32_t nlb = le32_to_cpu(range[i].nlb);
2493 if (nvme_check_bounds(ns, slba, nlb)) {
2494 trace_pci_nvme_err_invalid_lba_range(slba, nlb,
2495 ns->id_ns.nsze);
2496 continue;
2499 trace_pci_nvme_dsm_deallocate(nvme_cid(req), nvme_nsid(ns), slba,
2500 nlb);
2502 if (nlb > n->dmrsl) {
2503 trace_pci_nvme_dsm_single_range_limit_exceeded(nlb, n->dmrsl);
2506 offset = nvme_l2b(ns, slba);
2507 len = nvme_l2b(ns, nlb);
2509 while (len) {
2510 size_t bytes = MIN(BDRV_REQUEST_MAX_BYTES, len);
2512 (*discards)++;
2514 blk_aio_pdiscard(ns->blkconf.blk, offset, bytes,
2515 nvme_aio_discard_cb, req);
2517 offset += bytes;
2518 len -= bytes;
2522 /* account for the 1-initialization */
2523 (*discards)--;
2525 if (*discards) {
2526 status = NVME_NO_COMPLETE;
2527 } else {
2528 status = req->status;
2532 return status;
2535 static uint16_t nvme_verify(NvmeCtrl *n, NvmeRequest *req)
2537 NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd;
2538 NvmeNamespace *ns = req->ns;
2539 BlockBackend *blk = ns->blkconf.blk;
2540 uint64_t slba = le64_to_cpu(rw->slba);
2541 uint32_t nlb = le16_to_cpu(rw->nlb) + 1;
2542 size_t len = nvme_l2b(ns, nlb);
2543 int64_t offset = nvme_l2b(ns, slba);
2544 uint16_t ctrl = le16_to_cpu(rw->control);
2545 uint32_t reftag = le32_to_cpu(rw->reftag);
2546 NvmeBounceContext *ctx = NULL;
2547 uint16_t status;
2549 trace_pci_nvme_verify(nvme_cid(req), nvme_nsid(ns), slba, nlb);
2551 if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) {
2552 status = nvme_check_prinfo(ns, ctrl, slba, reftag);
2553 if (status) {
2554 return status;
2557 if (ctrl & NVME_RW_PRINFO_PRACT) {
2558 return NVME_INVALID_PROT_INFO | NVME_DNR;
2562 if (len > n->page_size << n->params.vsl) {
2563 return NVME_INVALID_FIELD | NVME_DNR;
2566 status = nvme_check_bounds(ns, slba, nlb);
2567 if (status) {
2568 trace_pci_nvme_err_invalid_lba_range(slba, nlb, ns->id_ns.nsze);
2569 return status;
2572 if (NVME_ERR_REC_DULBE(ns->features.err_rec)) {
2573 status = nvme_check_dulbe(ns, slba, nlb);
2574 if (status) {
2575 return status;
2579 ctx = g_new0(NvmeBounceContext, 1);
2580 ctx->req = req;
2582 ctx->data.bounce = g_malloc(len);
2584 qemu_iovec_init(&ctx->data.iov, 1);
2585 qemu_iovec_add(&ctx->data.iov, ctx->data.bounce, len);
2587 block_acct_start(blk_get_stats(blk), &req->acct, ctx->data.iov.size,
2588 BLOCK_ACCT_READ);
2590 req->aiocb = blk_aio_preadv(ns->blkconf.blk, offset, &ctx->data.iov, 0,
2591 nvme_verify_mdata_in_cb, ctx);
2592 return NVME_NO_COMPLETE;
2595 static uint16_t nvme_copy(NvmeCtrl *n, NvmeRequest *req)
2597 NvmeNamespace *ns = req->ns;
2598 NvmeCopyCmd *copy = (NvmeCopyCmd *)&req->cmd;
2600 uint16_t nr = copy->nr + 1;
2601 uint8_t format = copy->control[0] & 0xf;
2604 * Shift the PRINFOR/PRINFOW values by 10 to allow reusing the
2605 * NVME_RW_PRINFO constants.
2607 uint16_t prinfor = ((copy->control[0] >> 4) & 0xf) << 10;
2608 uint16_t prinfow = ((copy->control[2] >> 2) & 0xf) << 10;
2610 uint32_t nlb = 0;
2611 uint8_t *bounce = NULL, *bouncep = NULL;
2612 uint8_t *mbounce = NULL, *mbouncep = NULL;
2613 struct nvme_copy_ctx *ctx;
2614 uint16_t status;
2615 int i;
2617 trace_pci_nvme_copy(nvme_cid(req), nvme_nsid(ns), nr, format);
2619 if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps) &&
2620 ((prinfor & NVME_RW_PRINFO_PRACT) != (prinfow & NVME_RW_PRINFO_PRACT))) {
2621 return NVME_INVALID_FIELD | NVME_DNR;
2624 if (!(n->id_ctrl.ocfs & (1 << format))) {
2625 trace_pci_nvme_err_copy_invalid_format(format);
2626 return NVME_INVALID_FIELD | NVME_DNR;
2629 if (nr > ns->id_ns.msrc + 1) {
2630 return NVME_CMD_SIZE_LIMIT | NVME_DNR;
2633 ctx = g_new(struct nvme_copy_ctx, 1);
2634 ctx->ranges = g_new(NvmeCopySourceRange, nr);
2636 status = nvme_h2c(n, (uint8_t *)ctx->ranges,
2637 nr * sizeof(NvmeCopySourceRange), req);
2638 if (status) {
2639 goto out;
2642 for (i = 0; i < nr; i++) {
2643 uint64_t slba = le64_to_cpu(ctx->ranges[i].slba);
2644 uint32_t _nlb = le16_to_cpu(ctx->ranges[i].nlb) + 1;
2646 if (_nlb > le16_to_cpu(ns->id_ns.mssrl)) {
2647 status = NVME_CMD_SIZE_LIMIT | NVME_DNR;
2648 goto out;
2651 status = nvme_check_bounds(ns, slba, _nlb);
2652 if (status) {
2653 trace_pci_nvme_err_invalid_lba_range(slba, _nlb, ns->id_ns.nsze);
2654 goto out;
2657 if (NVME_ERR_REC_DULBE(ns->features.err_rec)) {
2658 status = nvme_check_dulbe(ns, slba, _nlb);
2659 if (status) {
2660 goto out;
2664 if (ns->params.zoned) {
2665 status = nvme_check_zone_read(ns, slba, _nlb);
2666 if (status) {
2667 goto out;
2671 nlb += _nlb;
2674 if (nlb > le32_to_cpu(ns->id_ns.mcl)) {
2675 status = NVME_CMD_SIZE_LIMIT | NVME_DNR;
2676 goto out;
2679 bounce = bouncep = g_malloc(nvme_l2b(ns, nlb));
2680 if (nvme_msize(ns)) {
2681 mbounce = mbouncep = g_malloc(nvme_m2b(ns, nlb));
2684 block_acct_start(blk_get_stats(ns->blkconf.blk), &req->acct, 0,
2685 BLOCK_ACCT_READ);
2687 ctx->bounce = bounce;
2688 ctx->mbounce = mbounce;
2689 ctx->nlb = nlb;
2690 ctx->copies = 1;
2692 req->opaque = ctx;
2694 for (i = 0; i < nr; i++) {
2695 uint64_t slba = le64_to_cpu(ctx->ranges[i].slba);
2696 uint32_t nlb = le16_to_cpu(ctx->ranges[i].nlb) + 1;
2698 size_t len = nvme_l2b(ns, nlb);
2699 int64_t offset = nvme_l2b(ns, slba);
2701 trace_pci_nvme_copy_source_range(slba, nlb);
2703 struct nvme_copy_in_ctx *in_ctx = g_new(struct nvme_copy_in_ctx, 1);
2704 in_ctx->req = req;
2706 qemu_iovec_init(&in_ctx->iov, 1);
2707 qemu_iovec_add(&in_ctx->iov, bouncep, len);
2709 ctx->copies++;
2711 blk_aio_preadv(ns->blkconf.blk, offset, &in_ctx->iov, 0,
2712 nvme_aio_copy_in_cb, in_ctx);
2714 bouncep += len;
2716 if (nvme_msize(ns)) {
2717 len = nvme_m2b(ns, nlb);
2718 offset = ns->mdata_offset + nvme_m2b(ns, slba);
2720 in_ctx = g_new(struct nvme_copy_in_ctx, 1);
2721 in_ctx->req = req;
2723 qemu_iovec_init(&in_ctx->iov, 1);
2724 qemu_iovec_add(&in_ctx->iov, mbouncep, len);
2726 ctx->copies++;
2728 blk_aio_preadv(ns->blkconf.blk, offset, &in_ctx->iov, 0,
2729 nvme_aio_copy_in_cb, in_ctx);
2731 mbouncep += len;
2735 /* account for the 1-initialization */
2736 ctx->copies--;
2738 if (!ctx->copies) {
2739 nvme_copy_in_complete(req);
2742 return NVME_NO_COMPLETE;
2744 out:
2745 g_free(ctx->ranges);
2746 g_free(ctx);
2748 return status;
2751 static uint16_t nvme_compare(NvmeCtrl *n, NvmeRequest *req)
2753 NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd;
2754 NvmeNamespace *ns = req->ns;
2755 BlockBackend *blk = ns->blkconf.blk;
2756 uint64_t slba = le64_to_cpu(rw->slba);
2757 uint32_t nlb = le16_to_cpu(rw->nlb) + 1;
2758 uint16_t ctrl = le16_to_cpu(rw->control);
2759 size_t data_len = nvme_l2b(ns, nlb);
2760 size_t len = data_len;
2761 int64_t offset = nvme_l2b(ns, slba);
2762 struct nvme_compare_ctx *ctx = NULL;
2763 uint16_t status;
2765 trace_pci_nvme_compare(nvme_cid(req), nvme_nsid(ns), slba, nlb);
2767 if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps) && (ctrl & NVME_RW_PRINFO_PRACT)) {
2768 return NVME_INVALID_PROT_INFO | NVME_DNR;
2771 if (nvme_ns_ext(ns)) {
2772 len += nvme_m2b(ns, nlb);
2775 status = nvme_check_mdts(n, len);
2776 if (status) {
2777 return status;
2780 status = nvme_check_bounds(ns, slba, nlb);
2781 if (status) {
2782 trace_pci_nvme_err_invalid_lba_range(slba, nlb, ns->id_ns.nsze);
2783 return status;
2786 if (NVME_ERR_REC_DULBE(ns->features.err_rec)) {
2787 status = nvme_check_dulbe(ns, slba, nlb);
2788 if (status) {
2789 return status;
2793 status = nvme_map_dptr(n, &req->sg, len, &req->cmd);
2794 if (status) {
2795 return status;
2798 ctx = g_new(struct nvme_compare_ctx, 1);
2799 ctx->data.bounce = g_malloc(data_len);
2801 req->opaque = ctx;
2803 qemu_iovec_init(&ctx->data.iov, 1);
2804 qemu_iovec_add(&ctx->data.iov, ctx->data.bounce, data_len);
2806 block_acct_start(blk_get_stats(blk), &req->acct, data_len,
2807 BLOCK_ACCT_READ);
2808 blk_aio_preadv(blk, offset, &ctx->data.iov, 0, nvme_compare_data_cb, req);
2810 return NVME_NO_COMPLETE;
2813 static uint16_t nvme_flush(NvmeCtrl *n, NvmeRequest *req)
2815 uint32_t nsid = le32_to_cpu(req->cmd.nsid);
2816 uintptr_t *num_flushes = (uintptr_t *)&req->opaque;
2817 uint16_t status;
2818 struct nvme_aio_flush_ctx *ctx;
2819 NvmeNamespace *ns;
2821 trace_pci_nvme_flush(nvme_cid(req), nsid);
2823 if (nsid != NVME_NSID_BROADCAST) {
2824 req->ns = nvme_ns(n, nsid);
2825 if (unlikely(!req->ns)) {
2826 return NVME_INVALID_FIELD | NVME_DNR;
2829 block_acct_start(blk_get_stats(req->ns->blkconf.blk), &req->acct, 0,
2830 BLOCK_ACCT_FLUSH);
2831 req->aiocb = blk_aio_flush(req->ns->blkconf.blk, nvme_misc_cb, req);
2832 return NVME_NO_COMPLETE;
2835 /* 1-initialize; see comment in nvme_dsm */
2836 *num_flushes = 1;
2838 for (int i = 1; i <= n->num_namespaces; i++) {
2839 ns = nvme_ns(n, i);
2840 if (!ns) {
2841 continue;
2844 ctx = g_new(struct nvme_aio_flush_ctx, 1);
2845 ctx->req = req;
2846 ctx->ns = ns;
2848 (*num_flushes)++;
2850 block_acct_start(blk_get_stats(ns->blkconf.blk), &ctx->acct, 0,
2851 BLOCK_ACCT_FLUSH);
2852 blk_aio_flush(ns->blkconf.blk, nvme_aio_flush_cb, ctx);
2855 /* account for the 1-initialization */
2856 (*num_flushes)--;
2858 if (*num_flushes) {
2859 status = NVME_NO_COMPLETE;
2860 } else {
2861 status = req->status;
2864 return status;
2867 static uint16_t nvme_read(NvmeCtrl *n, NvmeRequest *req)
2869 NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd;
2870 NvmeNamespace *ns = req->ns;
2871 uint64_t slba = le64_to_cpu(rw->slba);
2872 uint32_t nlb = (uint32_t)le16_to_cpu(rw->nlb) + 1;
2873 uint16_t ctrl = le16_to_cpu(rw->control);
2874 uint64_t data_size = nvme_l2b(ns, nlb);
2875 uint64_t mapped_size = data_size;
2876 uint64_t data_offset;
2877 BlockBackend *blk = ns->blkconf.blk;
2878 uint16_t status;
2880 if (nvme_ns_ext(ns)) {
2881 mapped_size += nvme_m2b(ns, nlb);
2883 if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) {
2884 bool pract = ctrl & NVME_RW_PRINFO_PRACT;
2886 if (pract && nvme_msize(ns) == 8) {
2887 mapped_size = data_size;
2892 trace_pci_nvme_read(nvme_cid(req), nvme_nsid(ns), nlb, mapped_size, slba);
2894 status = nvme_check_mdts(n, mapped_size);
2895 if (status) {
2896 goto invalid;
2899 status = nvme_check_bounds(ns, slba, nlb);
2900 if (status) {
2901 trace_pci_nvme_err_invalid_lba_range(slba, nlb, ns->id_ns.nsze);
2902 goto invalid;
2905 if (ns->params.zoned) {
2906 status = nvme_check_zone_read(ns, slba, nlb);
2907 if (status) {
2908 trace_pci_nvme_err_zone_read_not_ok(slba, nlb, status);
2909 goto invalid;
2913 if (NVME_ERR_REC_DULBE(ns->features.err_rec)) {
2914 status = nvme_check_dulbe(ns, slba, nlb);
2915 if (status) {
2916 goto invalid;
2920 if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) {
2921 return nvme_dif_rw(n, req);
2924 status = nvme_map_data(n, nlb, req);
2925 if (status) {
2926 goto invalid;
2929 data_offset = nvme_l2b(ns, slba);
2931 block_acct_start(blk_get_stats(blk), &req->acct, data_size,
2932 BLOCK_ACCT_READ);
2933 nvme_blk_read(blk, data_offset, nvme_rw_cb, req);
2934 return NVME_NO_COMPLETE;
2936 invalid:
2937 block_acct_invalid(blk_get_stats(blk), BLOCK_ACCT_READ);
2938 return status | NVME_DNR;
2941 static uint16_t nvme_do_write(NvmeCtrl *n, NvmeRequest *req, bool append,
2942 bool wrz)
2944 NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd;
2945 NvmeNamespace *ns = req->ns;
2946 uint64_t slba = le64_to_cpu(rw->slba);
2947 uint32_t nlb = (uint32_t)le16_to_cpu(rw->nlb) + 1;
2948 uint16_t ctrl = le16_to_cpu(rw->control);
2949 uint64_t data_size = nvme_l2b(ns, nlb);
2950 uint64_t mapped_size = data_size;
2951 uint64_t data_offset;
2952 NvmeZone *zone;
2953 NvmeZonedResult *res = (NvmeZonedResult *)&req->cqe;
2954 BlockBackend *blk = ns->blkconf.blk;
2955 uint16_t status;
2957 if (nvme_ns_ext(ns)) {
2958 mapped_size += nvme_m2b(ns, nlb);
2960 if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) {
2961 bool pract = ctrl & NVME_RW_PRINFO_PRACT;
2963 if (pract && nvme_msize(ns) == 8) {
2964 mapped_size -= nvme_m2b(ns, nlb);
2969 trace_pci_nvme_write(nvme_cid(req), nvme_io_opc_str(rw->opcode),
2970 nvme_nsid(ns), nlb, mapped_size, slba);
2972 if (!wrz) {
2973 status = nvme_check_mdts(n, mapped_size);
2974 if (status) {
2975 goto invalid;
2979 status = nvme_check_bounds(ns, slba, nlb);
2980 if (status) {
2981 trace_pci_nvme_err_invalid_lba_range(slba, nlb, ns->id_ns.nsze);
2982 goto invalid;
2985 if (ns->params.zoned) {
2986 zone = nvme_get_zone_by_slba(ns, slba);
2988 if (append) {
2989 bool piremap = !!(ctrl & NVME_RW_PIREMAP);
2991 if (unlikely(slba != zone->d.zslba)) {
2992 trace_pci_nvme_err_append_not_at_start(slba, zone->d.zslba);
2993 status = NVME_INVALID_FIELD;
2994 goto invalid;
2997 if (n->params.zasl &&
2998 data_size > (uint64_t)n->page_size << n->params.zasl) {
2999 trace_pci_nvme_err_zasl(data_size);
3000 return NVME_INVALID_FIELD | NVME_DNR;
3003 slba = zone->w_ptr;
3004 rw->slba = cpu_to_le64(slba);
3005 res->slba = cpu_to_le64(slba);
3007 switch (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) {
3008 case NVME_ID_NS_DPS_TYPE_1:
3009 if (!piremap) {
3010 return NVME_INVALID_PROT_INFO | NVME_DNR;
3013 /* fallthrough */
3015 case NVME_ID_NS_DPS_TYPE_2:
3016 if (piremap) {
3017 uint32_t reftag = le32_to_cpu(rw->reftag);
3018 rw->reftag = cpu_to_le32(reftag + (slba - zone->d.zslba));
3021 break;
3023 case NVME_ID_NS_DPS_TYPE_3:
3024 if (piremap) {
3025 return NVME_INVALID_PROT_INFO | NVME_DNR;
3028 break;
3032 status = nvme_check_zone_write(ns, zone, slba, nlb);
3033 if (status) {
3034 goto invalid;
3037 status = nvme_zrm_auto(ns, zone);
3038 if (status) {
3039 goto invalid;
3042 zone->w_ptr += nlb;
3045 data_offset = nvme_l2b(ns, slba);
3047 if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) {
3048 return nvme_dif_rw(n, req);
3051 if (!wrz) {
3052 status = nvme_map_data(n, nlb, req);
3053 if (status) {
3054 goto invalid;
3057 block_acct_start(blk_get_stats(blk), &req->acct, data_size,
3058 BLOCK_ACCT_WRITE);
3059 nvme_blk_write(blk, data_offset, nvme_rw_cb, req);
3060 } else {
3061 req->aiocb = blk_aio_pwrite_zeroes(blk, data_offset, data_size,
3062 BDRV_REQ_MAY_UNMAP, nvme_rw_cb,
3063 req);
3066 return NVME_NO_COMPLETE;
3068 invalid:
3069 block_acct_invalid(blk_get_stats(blk), BLOCK_ACCT_WRITE);
3070 return status | NVME_DNR;
3073 static inline uint16_t nvme_write(NvmeCtrl *n, NvmeRequest *req)
3075 return nvme_do_write(n, req, false, false);
3078 static inline uint16_t nvme_write_zeroes(NvmeCtrl *n, NvmeRequest *req)
3080 return nvme_do_write(n, req, false, true);
3083 static inline uint16_t nvme_zone_append(NvmeCtrl *n, NvmeRequest *req)
3085 return nvme_do_write(n, req, true, false);
3088 static uint16_t nvme_get_mgmt_zone_slba_idx(NvmeNamespace *ns, NvmeCmd *c,
3089 uint64_t *slba, uint32_t *zone_idx)
3091 uint32_t dw10 = le32_to_cpu(c->cdw10);
3092 uint32_t dw11 = le32_to_cpu(c->cdw11);
3094 if (!ns->params.zoned) {
3095 trace_pci_nvme_err_invalid_opc(c->opcode);
3096 return NVME_INVALID_OPCODE | NVME_DNR;
3099 *slba = ((uint64_t)dw11) << 32 | dw10;
3100 if (unlikely(*slba >= ns->id_ns.nsze)) {
3101 trace_pci_nvme_err_invalid_lba_range(*slba, 0, ns->id_ns.nsze);
3102 *slba = 0;
3103 return NVME_LBA_RANGE | NVME_DNR;
3106 *zone_idx = nvme_zone_idx(ns, *slba);
3107 assert(*zone_idx < ns->num_zones);
3109 return NVME_SUCCESS;
3112 typedef uint16_t (*op_handler_t)(NvmeNamespace *, NvmeZone *, NvmeZoneState,
3113 NvmeRequest *);
3115 enum NvmeZoneProcessingMask {
3116 NVME_PROC_CURRENT_ZONE = 0,
3117 NVME_PROC_OPENED_ZONES = 1 << 0,
3118 NVME_PROC_CLOSED_ZONES = 1 << 1,
3119 NVME_PROC_READ_ONLY_ZONES = 1 << 2,
3120 NVME_PROC_FULL_ZONES = 1 << 3,
3123 static uint16_t nvme_open_zone(NvmeNamespace *ns, NvmeZone *zone,
3124 NvmeZoneState state, NvmeRequest *req)
3126 return nvme_zrm_open(ns, zone);
3129 static uint16_t nvme_close_zone(NvmeNamespace *ns, NvmeZone *zone,
3130 NvmeZoneState state, NvmeRequest *req)
3132 return nvme_zrm_close(ns, zone);
3135 static uint16_t nvme_finish_zone(NvmeNamespace *ns, NvmeZone *zone,
3136 NvmeZoneState state, NvmeRequest *req)
3138 return nvme_zrm_finish(ns, zone);
3141 static uint16_t nvme_reset_zone(NvmeNamespace *ns, NvmeZone *zone,
3142 NvmeZoneState state, NvmeRequest *req)
3144 uintptr_t *resets = (uintptr_t *)&req->opaque;
3145 struct nvme_zone_reset_ctx *ctx;
3147 switch (state) {
3148 case NVME_ZONE_STATE_EMPTY:
3149 return NVME_SUCCESS;
3150 case NVME_ZONE_STATE_EXPLICITLY_OPEN:
3151 case NVME_ZONE_STATE_IMPLICITLY_OPEN:
3152 case NVME_ZONE_STATE_CLOSED:
3153 case NVME_ZONE_STATE_FULL:
3154 break;
3155 default:
3156 return NVME_ZONE_INVAL_TRANSITION;
3160 * The zone reset aio callback needs to know the zone that is being reset
3161 * in order to transition the zone on completion.
3163 ctx = g_new(struct nvme_zone_reset_ctx, 1);
3164 ctx->req = req;
3165 ctx->zone = zone;
3167 (*resets)++;
3169 blk_aio_pwrite_zeroes(ns->blkconf.blk, nvme_l2b(ns, zone->d.zslba),
3170 nvme_l2b(ns, ns->zone_size), BDRV_REQ_MAY_UNMAP,
3171 nvme_aio_zone_reset_cb, ctx);
3173 return NVME_NO_COMPLETE;
3176 static uint16_t nvme_offline_zone(NvmeNamespace *ns, NvmeZone *zone,
3177 NvmeZoneState state, NvmeRequest *req)
3179 switch (state) {
3180 case NVME_ZONE_STATE_READ_ONLY:
3181 nvme_assign_zone_state(ns, zone, NVME_ZONE_STATE_OFFLINE);
3182 /* fall through */
3183 case NVME_ZONE_STATE_OFFLINE:
3184 return NVME_SUCCESS;
3185 default:
3186 return NVME_ZONE_INVAL_TRANSITION;
3190 static uint16_t nvme_set_zd_ext(NvmeNamespace *ns, NvmeZone *zone)
3192 uint16_t status;
3193 uint8_t state = nvme_get_zone_state(zone);
3195 if (state == NVME_ZONE_STATE_EMPTY) {
3196 status = nvme_aor_check(ns, 1, 0);
3197 if (status) {
3198 return status;
3200 nvme_aor_inc_active(ns);
3201 zone->d.za |= NVME_ZA_ZD_EXT_VALID;
3202 nvme_assign_zone_state(ns, zone, NVME_ZONE_STATE_CLOSED);
3203 return NVME_SUCCESS;
3206 return NVME_ZONE_INVAL_TRANSITION;
3209 static uint16_t nvme_bulk_proc_zone(NvmeNamespace *ns, NvmeZone *zone,
3210 enum NvmeZoneProcessingMask proc_mask,
3211 op_handler_t op_hndlr, NvmeRequest *req)
3213 uint16_t status = NVME_SUCCESS;
3214 NvmeZoneState zs = nvme_get_zone_state(zone);
3215 bool proc_zone;
3217 switch (zs) {
3218 case NVME_ZONE_STATE_IMPLICITLY_OPEN:
3219 case NVME_ZONE_STATE_EXPLICITLY_OPEN:
3220 proc_zone = proc_mask & NVME_PROC_OPENED_ZONES;
3221 break;
3222 case NVME_ZONE_STATE_CLOSED:
3223 proc_zone = proc_mask & NVME_PROC_CLOSED_ZONES;
3224 break;
3225 case NVME_ZONE_STATE_READ_ONLY:
3226 proc_zone = proc_mask & NVME_PROC_READ_ONLY_ZONES;
3227 break;
3228 case NVME_ZONE_STATE_FULL:
3229 proc_zone = proc_mask & NVME_PROC_FULL_ZONES;
3230 break;
3231 default:
3232 proc_zone = false;
3235 if (proc_zone) {
3236 status = op_hndlr(ns, zone, zs, req);
3239 return status;
3242 static uint16_t nvme_do_zone_op(NvmeNamespace *ns, NvmeZone *zone,
3243 enum NvmeZoneProcessingMask proc_mask,
3244 op_handler_t op_hndlr, NvmeRequest *req)
3246 NvmeZone *next;
3247 uint16_t status = NVME_SUCCESS;
3248 int i;
3250 if (!proc_mask) {
3251 status = op_hndlr(ns, zone, nvme_get_zone_state(zone), req);
3252 } else {
3253 if (proc_mask & NVME_PROC_CLOSED_ZONES) {
3254 QTAILQ_FOREACH_SAFE(zone, &ns->closed_zones, entry, next) {
3255 status = nvme_bulk_proc_zone(ns, zone, proc_mask, op_hndlr,
3256 req);
3257 if (status && status != NVME_NO_COMPLETE) {
3258 goto out;
3262 if (proc_mask & NVME_PROC_OPENED_ZONES) {
3263 QTAILQ_FOREACH_SAFE(zone, &ns->imp_open_zones, entry, next) {
3264 status = nvme_bulk_proc_zone(ns, zone, proc_mask, op_hndlr,
3265 req);
3266 if (status && status != NVME_NO_COMPLETE) {
3267 goto out;
3271 QTAILQ_FOREACH_SAFE(zone, &ns->exp_open_zones, entry, next) {
3272 status = nvme_bulk_proc_zone(ns, zone, proc_mask, op_hndlr,
3273 req);
3274 if (status && status != NVME_NO_COMPLETE) {
3275 goto out;
3279 if (proc_mask & NVME_PROC_FULL_ZONES) {
3280 QTAILQ_FOREACH_SAFE(zone, &ns->full_zones, entry, next) {
3281 status = nvme_bulk_proc_zone(ns, zone, proc_mask, op_hndlr,
3282 req);
3283 if (status && status != NVME_NO_COMPLETE) {
3284 goto out;
3289 if (proc_mask & NVME_PROC_READ_ONLY_ZONES) {
3290 for (i = 0; i < ns->num_zones; i++, zone++) {
3291 status = nvme_bulk_proc_zone(ns, zone, proc_mask, op_hndlr,
3292 req);
3293 if (status && status != NVME_NO_COMPLETE) {
3294 goto out;
3300 out:
3301 return status;
3304 static uint16_t nvme_zone_mgmt_send(NvmeCtrl *n, NvmeRequest *req)
3306 NvmeCmd *cmd = (NvmeCmd *)&req->cmd;
3307 NvmeNamespace *ns = req->ns;
3308 NvmeZone *zone;
3309 uintptr_t *resets;
3310 uint8_t *zd_ext;
3311 uint32_t dw13 = le32_to_cpu(cmd->cdw13);
3312 uint64_t slba = 0;
3313 uint32_t zone_idx = 0;
3314 uint16_t status;
3315 uint8_t action;
3316 bool all;
3317 enum NvmeZoneProcessingMask proc_mask = NVME_PROC_CURRENT_ZONE;
3319 action = dw13 & 0xff;
3320 all = dw13 & 0x100;
3322 req->status = NVME_SUCCESS;
3324 if (!all) {
3325 status = nvme_get_mgmt_zone_slba_idx(ns, cmd, &slba, &zone_idx);
3326 if (status) {
3327 return status;
3331 zone = &ns->zone_array[zone_idx];
3332 if (slba != zone->d.zslba) {
3333 trace_pci_nvme_err_unaligned_zone_cmd(action, slba, zone->d.zslba);
3334 return NVME_INVALID_FIELD | NVME_DNR;
3337 switch (action) {
3339 case NVME_ZONE_ACTION_OPEN:
3340 if (all) {
3341 proc_mask = NVME_PROC_CLOSED_ZONES;
3343 trace_pci_nvme_open_zone(slba, zone_idx, all);
3344 status = nvme_do_zone_op(ns, zone, proc_mask, nvme_open_zone, req);
3345 break;
3347 case NVME_ZONE_ACTION_CLOSE:
3348 if (all) {
3349 proc_mask = NVME_PROC_OPENED_ZONES;
3351 trace_pci_nvme_close_zone(slba, zone_idx, all);
3352 status = nvme_do_zone_op(ns, zone, proc_mask, nvme_close_zone, req);
3353 break;
3355 case NVME_ZONE_ACTION_FINISH:
3356 if (all) {
3357 proc_mask = NVME_PROC_OPENED_ZONES | NVME_PROC_CLOSED_ZONES;
3359 trace_pci_nvme_finish_zone(slba, zone_idx, all);
3360 status = nvme_do_zone_op(ns, zone, proc_mask, nvme_finish_zone, req);
3361 break;
3363 case NVME_ZONE_ACTION_RESET:
3364 resets = (uintptr_t *)&req->opaque;
3366 if (all) {
3367 proc_mask = NVME_PROC_OPENED_ZONES | NVME_PROC_CLOSED_ZONES |
3368 NVME_PROC_FULL_ZONES;
3370 trace_pci_nvme_reset_zone(slba, zone_idx, all);
3372 *resets = 1;
3374 status = nvme_do_zone_op(ns, zone, proc_mask, nvme_reset_zone, req);
3376 (*resets)--;
3378 return *resets ? NVME_NO_COMPLETE : req->status;
3380 case NVME_ZONE_ACTION_OFFLINE:
3381 if (all) {
3382 proc_mask = NVME_PROC_READ_ONLY_ZONES;
3384 trace_pci_nvme_offline_zone(slba, zone_idx, all);
3385 status = nvme_do_zone_op(ns, zone, proc_mask, nvme_offline_zone, req);
3386 break;
3388 case NVME_ZONE_ACTION_SET_ZD_EXT:
3389 trace_pci_nvme_set_descriptor_extension(slba, zone_idx);
3390 if (all || !ns->params.zd_extension_size) {
3391 return NVME_INVALID_FIELD | NVME_DNR;
3393 zd_ext = nvme_get_zd_extension(ns, zone_idx);
3394 status = nvme_h2c(n, zd_ext, ns->params.zd_extension_size, req);
3395 if (status) {
3396 trace_pci_nvme_err_zd_extension_map_error(zone_idx);
3397 return status;
3400 status = nvme_set_zd_ext(ns, zone);
3401 if (status == NVME_SUCCESS) {
3402 trace_pci_nvme_zd_extension_set(zone_idx);
3403 return status;
3405 break;
3407 default:
3408 trace_pci_nvme_err_invalid_mgmt_action(action);
3409 status = NVME_INVALID_FIELD;
3412 if (status == NVME_ZONE_INVAL_TRANSITION) {
3413 trace_pci_nvme_err_invalid_zone_state_transition(action, slba,
3414 zone->d.za);
3416 if (status) {
3417 status |= NVME_DNR;
3420 return status;
3423 static bool nvme_zone_matches_filter(uint32_t zafs, NvmeZone *zl)
3425 NvmeZoneState zs = nvme_get_zone_state(zl);
3427 switch (zafs) {
3428 case NVME_ZONE_REPORT_ALL:
3429 return true;
3430 case NVME_ZONE_REPORT_EMPTY:
3431 return zs == NVME_ZONE_STATE_EMPTY;
3432 case NVME_ZONE_REPORT_IMPLICITLY_OPEN:
3433 return zs == NVME_ZONE_STATE_IMPLICITLY_OPEN;
3434 case NVME_ZONE_REPORT_EXPLICITLY_OPEN:
3435 return zs == NVME_ZONE_STATE_EXPLICITLY_OPEN;
3436 case NVME_ZONE_REPORT_CLOSED:
3437 return zs == NVME_ZONE_STATE_CLOSED;
3438 case NVME_ZONE_REPORT_FULL:
3439 return zs == NVME_ZONE_STATE_FULL;
3440 case NVME_ZONE_REPORT_READ_ONLY:
3441 return zs == NVME_ZONE_STATE_READ_ONLY;
3442 case NVME_ZONE_REPORT_OFFLINE:
3443 return zs == NVME_ZONE_STATE_OFFLINE;
3444 default:
3445 return false;
3449 static uint16_t nvme_zone_mgmt_recv(NvmeCtrl *n, NvmeRequest *req)
3451 NvmeCmd *cmd = (NvmeCmd *)&req->cmd;
3452 NvmeNamespace *ns = req->ns;
3453 /* cdw12 is zero-based number of dwords to return. Convert to bytes */
3454 uint32_t data_size = (le32_to_cpu(cmd->cdw12) + 1) << 2;
3455 uint32_t dw13 = le32_to_cpu(cmd->cdw13);
3456 uint32_t zone_idx, zra, zrasf, partial;
3457 uint64_t max_zones, nr_zones = 0;
3458 uint16_t status;
3459 uint64_t slba;
3460 NvmeZoneDescr *z;
3461 NvmeZone *zone;
3462 NvmeZoneReportHeader *header;
3463 void *buf, *buf_p;
3464 size_t zone_entry_sz;
3465 int i;
3467 req->status = NVME_SUCCESS;
3469 status = nvme_get_mgmt_zone_slba_idx(ns, cmd, &slba, &zone_idx);
3470 if (status) {
3471 return status;
3474 zra = dw13 & 0xff;
3475 if (zra != NVME_ZONE_REPORT && zra != NVME_ZONE_REPORT_EXTENDED) {
3476 return NVME_INVALID_FIELD | NVME_DNR;
3478 if (zra == NVME_ZONE_REPORT_EXTENDED && !ns->params.zd_extension_size) {
3479 return NVME_INVALID_FIELD | NVME_DNR;
3482 zrasf = (dw13 >> 8) & 0xff;
3483 if (zrasf > NVME_ZONE_REPORT_OFFLINE) {
3484 return NVME_INVALID_FIELD | NVME_DNR;
3487 if (data_size < sizeof(NvmeZoneReportHeader)) {
3488 return NVME_INVALID_FIELD | NVME_DNR;
3491 status = nvme_check_mdts(n, data_size);
3492 if (status) {
3493 return status;
3496 partial = (dw13 >> 16) & 0x01;
3498 zone_entry_sz = sizeof(NvmeZoneDescr);
3499 if (zra == NVME_ZONE_REPORT_EXTENDED) {
3500 zone_entry_sz += ns->params.zd_extension_size;
3503 max_zones = (data_size - sizeof(NvmeZoneReportHeader)) / zone_entry_sz;
3504 buf = g_malloc0(data_size);
3506 zone = &ns->zone_array[zone_idx];
3507 for (i = zone_idx; i < ns->num_zones; i++) {
3508 if (partial && nr_zones >= max_zones) {
3509 break;
3511 if (nvme_zone_matches_filter(zrasf, zone++)) {
3512 nr_zones++;
3515 header = (NvmeZoneReportHeader *)buf;
3516 header->nr_zones = cpu_to_le64(nr_zones);
3518 buf_p = buf + sizeof(NvmeZoneReportHeader);
3519 for (; zone_idx < ns->num_zones && max_zones > 0; zone_idx++) {
3520 zone = &ns->zone_array[zone_idx];
3521 if (nvme_zone_matches_filter(zrasf, zone)) {
3522 z = (NvmeZoneDescr *)buf_p;
3523 buf_p += sizeof(NvmeZoneDescr);
3525 z->zt = zone->d.zt;
3526 z->zs = zone->d.zs;
3527 z->zcap = cpu_to_le64(zone->d.zcap);
3528 z->zslba = cpu_to_le64(zone->d.zslba);
3529 z->za = zone->d.za;
3531 if (nvme_wp_is_valid(zone)) {
3532 z->wp = cpu_to_le64(zone->d.wp);
3533 } else {
3534 z->wp = cpu_to_le64(~0ULL);
3537 if (zra == NVME_ZONE_REPORT_EXTENDED) {
3538 if (zone->d.za & NVME_ZA_ZD_EXT_VALID) {
3539 memcpy(buf_p, nvme_get_zd_extension(ns, zone_idx),
3540 ns->params.zd_extension_size);
3542 buf_p += ns->params.zd_extension_size;
3545 max_zones--;
3549 status = nvme_c2h(n, (uint8_t *)buf, data_size, req);
3551 g_free(buf);
3553 return status;
3556 static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeRequest *req)
3558 uint32_t nsid = le32_to_cpu(req->cmd.nsid);
3560 trace_pci_nvme_io_cmd(nvme_cid(req), nsid, nvme_sqid(req),
3561 req->cmd.opcode, nvme_io_opc_str(req->cmd.opcode));
3563 if (!nvme_nsid_valid(n, nsid)) {
3564 return NVME_INVALID_NSID | NVME_DNR;
3568 * In the base NVM command set, Flush may apply to all namespaces
3569 * (indicated by NSID being set to 0xFFFFFFFF). But if that feature is used
3570 * along with TP 4056 (Namespace Types), it may be pretty screwed up.
3572 * If NSID is indeed set to 0xFFFFFFFF, we simply cannot associate the
3573 * opcode with a specific command since we cannot determine a unique I/O
3574 * command set. Opcode 0x0 could have any other meaning than something
3575 * equivalent to flushing and say it DOES have completely different
3576 * semantics in some other command set - does an NSID of 0xFFFFFFFF then
3577 * mean "for all namespaces, apply whatever command set specific command
3578 * that uses the 0x0 opcode?" Or does it mean "for all namespaces, apply
3579 * whatever command that uses the 0x0 opcode if, and only if, it allows
3580 * NSID to be 0xFFFFFFFF"?
3582 * Anyway (and luckily), for now, we do not care about this since the
3583 * device only supports namespace types that includes the NVM Flush command
3584 * (NVM and Zoned), so always do an NVM Flush.
3586 if (req->cmd.opcode == NVME_CMD_FLUSH) {
3587 return nvme_flush(n, req);
3590 req->ns = nvme_ns(n, nsid);
3591 if (unlikely(!req->ns)) {
3592 return NVME_INVALID_FIELD | NVME_DNR;
3595 if (!(req->ns->iocs[req->cmd.opcode] & NVME_CMD_EFF_CSUPP)) {
3596 trace_pci_nvme_err_invalid_opc(req->cmd.opcode);
3597 return NVME_INVALID_OPCODE | NVME_DNR;
3600 switch (req->cmd.opcode) {
3601 case NVME_CMD_WRITE_ZEROES:
3602 return nvme_write_zeroes(n, req);
3603 case NVME_CMD_ZONE_APPEND:
3604 return nvme_zone_append(n, req);
3605 case NVME_CMD_WRITE:
3606 return nvme_write(n, req);
3607 case NVME_CMD_READ:
3608 return nvme_read(n, req);
3609 case NVME_CMD_COMPARE:
3610 return nvme_compare(n, req);
3611 case NVME_CMD_DSM:
3612 return nvme_dsm(n, req);
3613 case NVME_CMD_VERIFY:
3614 return nvme_verify(n, req);
3615 case NVME_CMD_COPY:
3616 return nvme_copy(n, req);
3617 case NVME_CMD_ZONE_MGMT_SEND:
3618 return nvme_zone_mgmt_send(n, req);
3619 case NVME_CMD_ZONE_MGMT_RECV:
3620 return nvme_zone_mgmt_recv(n, req);
3621 default:
3622 assert(false);
3625 return NVME_INVALID_OPCODE | NVME_DNR;
3628 static void nvme_free_sq(NvmeSQueue *sq, NvmeCtrl *n)
3630 n->sq[sq->sqid] = NULL;
3631 timer_free(sq->timer);
3632 g_free(sq->io_req);
3633 if (sq->sqid) {
3634 g_free(sq);
3638 static uint16_t nvme_del_sq(NvmeCtrl *n, NvmeRequest *req)
3640 NvmeDeleteQ *c = (NvmeDeleteQ *)&req->cmd;
3641 NvmeRequest *r, *next;
3642 NvmeSQueue *sq;
3643 NvmeCQueue *cq;
3644 uint16_t qid = le16_to_cpu(c->qid);
3646 if (unlikely(!qid || nvme_check_sqid(n, qid))) {
3647 trace_pci_nvme_err_invalid_del_sq(qid);
3648 return NVME_INVALID_QID | NVME_DNR;
3651 trace_pci_nvme_del_sq(qid);
3653 sq = n->sq[qid];
3654 while (!QTAILQ_EMPTY(&sq->out_req_list)) {
3655 r = QTAILQ_FIRST(&sq->out_req_list);
3656 assert(r->aiocb);
3657 blk_aio_cancel(r->aiocb);
3659 if (!nvme_check_cqid(n, sq->cqid)) {
3660 cq = n->cq[sq->cqid];
3661 QTAILQ_REMOVE(&cq->sq_list, sq, entry);
3663 nvme_post_cqes(cq);
3664 QTAILQ_FOREACH_SAFE(r, &cq->req_list, entry, next) {
3665 if (r->sq == sq) {
3666 QTAILQ_REMOVE(&cq->req_list, r, entry);
3667 QTAILQ_INSERT_TAIL(&sq->req_list, r, entry);
3672 nvme_free_sq(sq, n);
3673 return NVME_SUCCESS;
3676 static void nvme_init_sq(NvmeSQueue *sq, NvmeCtrl *n, uint64_t dma_addr,
3677 uint16_t sqid, uint16_t cqid, uint16_t size)
3679 int i;
3680 NvmeCQueue *cq;
3682 sq->ctrl = n;
3683 sq->dma_addr = dma_addr;
3684 sq->sqid = sqid;
3685 sq->size = size;
3686 sq->cqid = cqid;
3687 sq->head = sq->tail = 0;
3688 sq->io_req = g_new0(NvmeRequest, sq->size);
3690 QTAILQ_INIT(&sq->req_list);
3691 QTAILQ_INIT(&sq->out_req_list);
3692 for (i = 0; i < sq->size; i++) {
3693 sq->io_req[i].sq = sq;
3694 QTAILQ_INSERT_TAIL(&(sq->req_list), &sq->io_req[i], entry);
3696 sq->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, nvme_process_sq, sq);
3698 assert(n->cq[cqid]);
3699 cq = n->cq[cqid];
3700 QTAILQ_INSERT_TAIL(&(cq->sq_list), sq, entry);
3701 n->sq[sqid] = sq;
3704 static uint16_t nvme_create_sq(NvmeCtrl *n, NvmeRequest *req)
3706 NvmeSQueue *sq;
3707 NvmeCreateSq *c = (NvmeCreateSq *)&req->cmd;
3709 uint16_t cqid = le16_to_cpu(c->cqid);
3710 uint16_t sqid = le16_to_cpu(c->sqid);
3711 uint16_t qsize = le16_to_cpu(c->qsize);
3712 uint16_t qflags = le16_to_cpu(c->sq_flags);
3713 uint64_t prp1 = le64_to_cpu(c->prp1);
3715 trace_pci_nvme_create_sq(prp1, sqid, cqid, qsize, qflags);
3717 if (unlikely(!cqid || nvme_check_cqid(n, cqid))) {
3718 trace_pci_nvme_err_invalid_create_sq_cqid(cqid);
3719 return NVME_INVALID_CQID | NVME_DNR;
3721 if (unlikely(!sqid || sqid > n->params.max_ioqpairs ||
3722 n->sq[sqid] != NULL)) {
3723 trace_pci_nvme_err_invalid_create_sq_sqid(sqid);
3724 return NVME_INVALID_QID | NVME_DNR;
3726 if (unlikely(!qsize || qsize > NVME_CAP_MQES(n->bar.cap))) {
3727 trace_pci_nvme_err_invalid_create_sq_size(qsize);
3728 return NVME_MAX_QSIZE_EXCEEDED | NVME_DNR;
3730 if (unlikely(prp1 & (n->page_size - 1))) {
3731 trace_pci_nvme_err_invalid_create_sq_addr(prp1);
3732 return NVME_INVALID_PRP_OFFSET | NVME_DNR;
3734 if (unlikely(!(NVME_SQ_FLAGS_PC(qflags)))) {
3735 trace_pci_nvme_err_invalid_create_sq_qflags(NVME_SQ_FLAGS_PC(qflags));
3736 return NVME_INVALID_FIELD | NVME_DNR;
3738 sq = g_malloc0(sizeof(*sq));
3739 nvme_init_sq(sq, n, prp1, sqid, cqid, qsize + 1);
3740 return NVME_SUCCESS;
3743 struct nvme_stats {
3744 uint64_t units_read;
3745 uint64_t units_written;
3746 uint64_t read_commands;
3747 uint64_t write_commands;
3750 static void nvme_set_blk_stats(NvmeNamespace *ns, struct nvme_stats *stats)
3752 BlockAcctStats *s = blk_get_stats(ns->blkconf.blk);
3754 stats->units_read += s->nr_bytes[BLOCK_ACCT_READ] >> BDRV_SECTOR_BITS;
3755 stats->units_written += s->nr_bytes[BLOCK_ACCT_WRITE] >> BDRV_SECTOR_BITS;
3756 stats->read_commands += s->nr_ops[BLOCK_ACCT_READ];
3757 stats->write_commands += s->nr_ops[BLOCK_ACCT_WRITE];
3760 static uint16_t nvme_smart_info(NvmeCtrl *n, uint8_t rae, uint32_t buf_len,
3761 uint64_t off, NvmeRequest *req)
3763 uint32_t nsid = le32_to_cpu(req->cmd.nsid);
3764 struct nvme_stats stats = { 0 };
3765 NvmeSmartLog smart = { 0 };
3766 uint32_t trans_len;
3767 NvmeNamespace *ns;
3768 time_t current_ms;
3770 if (off >= sizeof(smart)) {
3771 return NVME_INVALID_FIELD | NVME_DNR;
3774 if (nsid != 0xffffffff) {
3775 ns = nvme_ns(n, nsid);
3776 if (!ns) {
3777 return NVME_INVALID_NSID | NVME_DNR;
3779 nvme_set_blk_stats(ns, &stats);
3780 } else {
3781 int i;
3783 for (i = 1; i <= n->num_namespaces; i++) {
3784 ns = nvme_ns(n, i);
3785 if (!ns) {
3786 continue;
3788 nvme_set_blk_stats(ns, &stats);
3792 trans_len = MIN(sizeof(smart) - off, buf_len);
3793 smart.critical_warning = n->smart_critical_warning;
3795 smart.data_units_read[0] = cpu_to_le64(DIV_ROUND_UP(stats.units_read,
3796 1000));
3797 smart.data_units_written[0] = cpu_to_le64(DIV_ROUND_UP(stats.units_written,
3798 1000));
3799 smart.host_read_commands[0] = cpu_to_le64(stats.read_commands);
3800 smart.host_write_commands[0] = cpu_to_le64(stats.write_commands);
3802 smart.temperature = cpu_to_le16(n->temperature);
3804 if ((n->temperature >= n->features.temp_thresh_hi) ||
3805 (n->temperature <= n->features.temp_thresh_low)) {
3806 smart.critical_warning |= NVME_SMART_TEMPERATURE;
3809 current_ms = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL);
3810 smart.power_on_hours[0] =
3811 cpu_to_le64((((current_ms - n->starttime_ms) / 1000) / 60) / 60);
3813 if (!rae) {
3814 nvme_clear_events(n, NVME_AER_TYPE_SMART);
3817 return nvme_c2h(n, (uint8_t *) &smart + off, trans_len, req);
3820 static uint16_t nvme_fw_log_info(NvmeCtrl *n, uint32_t buf_len, uint64_t off,
3821 NvmeRequest *req)
3823 uint32_t trans_len;
3824 NvmeFwSlotInfoLog fw_log = {
3825 .afi = 0x1,
3828 if (off >= sizeof(fw_log)) {
3829 return NVME_INVALID_FIELD | NVME_DNR;
3832 strpadcpy((char *)&fw_log.frs1, sizeof(fw_log.frs1), "1.0", ' ');
3833 trans_len = MIN(sizeof(fw_log) - off, buf_len);
3835 return nvme_c2h(n, (uint8_t *) &fw_log + off, trans_len, req);
3838 static uint16_t nvme_error_info(NvmeCtrl *n, uint8_t rae, uint32_t buf_len,
3839 uint64_t off, NvmeRequest *req)
3841 uint32_t trans_len;
3842 NvmeErrorLog errlog;
3844 if (off >= sizeof(errlog)) {
3845 return NVME_INVALID_FIELD | NVME_DNR;
3848 if (!rae) {
3849 nvme_clear_events(n, NVME_AER_TYPE_ERROR);
3852 memset(&errlog, 0x0, sizeof(errlog));
3853 trans_len = MIN(sizeof(errlog) - off, buf_len);
3855 return nvme_c2h(n, (uint8_t *)&errlog, trans_len, req);
3858 static uint16_t nvme_changed_nslist(NvmeCtrl *n, uint8_t rae, uint32_t buf_len,
3859 uint64_t off, NvmeRequest *req)
3861 uint32_t nslist[1024];
3862 uint32_t trans_len;
3863 int i = 0;
3864 uint32_t nsid;
3866 memset(nslist, 0x0, sizeof(nslist));
3867 trans_len = MIN(sizeof(nslist) - off, buf_len);
3869 while ((nsid = find_first_bit(n->changed_nsids, NVME_CHANGED_NSID_SIZE)) !=
3870 NVME_CHANGED_NSID_SIZE) {
3872 * If more than 1024 namespaces, the first entry in the log page should
3873 * be set to 0xffffffff and the others to 0 as spec.
3875 if (i == ARRAY_SIZE(nslist)) {
3876 memset(nslist, 0x0, sizeof(nslist));
3877 nslist[0] = 0xffffffff;
3878 break;
3881 nslist[i++] = nsid;
3882 clear_bit(nsid, n->changed_nsids);
3886 * Remove all the remaining list entries in case returns directly due to
3887 * more than 1024 namespaces.
3889 if (nslist[0] == 0xffffffff) {
3890 bitmap_zero(n->changed_nsids, NVME_CHANGED_NSID_SIZE);
3893 if (!rae) {
3894 nvme_clear_events(n, NVME_AER_TYPE_NOTICE);
3897 return nvme_c2h(n, ((uint8_t *)nslist) + off, trans_len, req);
3900 static uint16_t nvme_cmd_effects(NvmeCtrl *n, uint8_t csi, uint32_t buf_len,
3901 uint64_t off, NvmeRequest *req)
3903 NvmeEffectsLog log = {};
3904 const uint32_t *src_iocs = NULL;
3905 uint32_t trans_len;
3907 if (off >= sizeof(log)) {
3908 trace_pci_nvme_err_invalid_log_page_offset(off, sizeof(log));
3909 return NVME_INVALID_FIELD | NVME_DNR;
3912 switch (NVME_CC_CSS(n->bar.cc)) {
3913 case NVME_CC_CSS_NVM:
3914 src_iocs = nvme_cse_iocs_nvm;
3915 /* fall through */
3916 case NVME_CC_CSS_ADMIN_ONLY:
3917 break;
3918 case NVME_CC_CSS_CSI:
3919 switch (csi) {
3920 case NVME_CSI_NVM:
3921 src_iocs = nvme_cse_iocs_nvm;
3922 break;
3923 case NVME_CSI_ZONED:
3924 src_iocs = nvme_cse_iocs_zoned;
3925 break;
3929 memcpy(log.acs, nvme_cse_acs, sizeof(nvme_cse_acs));
3931 if (src_iocs) {
3932 memcpy(log.iocs, src_iocs, sizeof(log.iocs));
3935 trans_len = MIN(sizeof(log) - off, buf_len);
3937 return nvme_c2h(n, ((uint8_t *)&log) + off, trans_len, req);
3940 static uint16_t nvme_get_log(NvmeCtrl *n, NvmeRequest *req)
3942 NvmeCmd *cmd = &req->cmd;
3944 uint32_t dw10 = le32_to_cpu(cmd->cdw10);
3945 uint32_t dw11 = le32_to_cpu(cmd->cdw11);
3946 uint32_t dw12 = le32_to_cpu(cmd->cdw12);
3947 uint32_t dw13 = le32_to_cpu(cmd->cdw13);
3948 uint8_t lid = dw10 & 0xff;
3949 uint8_t lsp = (dw10 >> 8) & 0xf;
3950 uint8_t rae = (dw10 >> 15) & 0x1;
3951 uint8_t csi = le32_to_cpu(cmd->cdw14) >> 24;
3952 uint32_t numdl, numdu;
3953 uint64_t off, lpol, lpou;
3954 size_t len;
3955 uint16_t status;
3957 numdl = (dw10 >> 16);
3958 numdu = (dw11 & 0xffff);
3959 lpol = dw12;
3960 lpou = dw13;
3962 len = (((numdu << 16) | numdl) + 1) << 2;
3963 off = (lpou << 32ULL) | lpol;
3965 if (off & 0x3) {
3966 return NVME_INVALID_FIELD | NVME_DNR;
3969 trace_pci_nvme_get_log(nvme_cid(req), lid, lsp, rae, len, off);
3971 status = nvme_check_mdts(n, len);
3972 if (status) {
3973 return status;
3976 switch (lid) {
3977 case NVME_LOG_ERROR_INFO:
3978 return nvme_error_info(n, rae, len, off, req);
3979 case NVME_LOG_SMART_INFO:
3980 return nvme_smart_info(n, rae, len, off, req);
3981 case NVME_LOG_FW_SLOT_INFO:
3982 return nvme_fw_log_info(n, len, off, req);
3983 case NVME_LOG_CHANGED_NSLIST:
3984 return nvme_changed_nslist(n, rae, len, off, req);
3985 case NVME_LOG_CMD_EFFECTS:
3986 return nvme_cmd_effects(n, csi, len, off, req);
3987 default:
3988 trace_pci_nvme_err_invalid_log_page(nvme_cid(req), lid);
3989 return NVME_INVALID_FIELD | NVME_DNR;
3993 static void nvme_free_cq(NvmeCQueue *cq, NvmeCtrl *n)
3995 n->cq[cq->cqid] = NULL;
3996 timer_free(cq->timer);
3997 if (msix_enabled(&n->parent_obj)) {
3998 msix_vector_unuse(&n->parent_obj, cq->vector);
4000 if (cq->cqid) {
4001 g_free(cq);
4005 static uint16_t nvme_del_cq(NvmeCtrl *n, NvmeRequest *req)
4007 NvmeDeleteQ *c = (NvmeDeleteQ *)&req->cmd;
4008 NvmeCQueue *cq;
4009 uint16_t qid = le16_to_cpu(c->qid);
4011 if (unlikely(!qid || nvme_check_cqid(n, qid))) {
4012 trace_pci_nvme_err_invalid_del_cq_cqid(qid);
4013 return NVME_INVALID_CQID | NVME_DNR;
4016 cq = n->cq[qid];
4017 if (unlikely(!QTAILQ_EMPTY(&cq->sq_list))) {
4018 trace_pci_nvme_err_invalid_del_cq_notempty(qid);
4019 return NVME_INVALID_QUEUE_DEL;
4021 nvme_irq_deassert(n, cq);
4022 trace_pci_nvme_del_cq(qid);
4023 nvme_free_cq(cq, n);
4024 return NVME_SUCCESS;
4027 static void nvme_init_cq(NvmeCQueue *cq, NvmeCtrl *n, uint64_t dma_addr,
4028 uint16_t cqid, uint16_t vector, uint16_t size,
4029 uint16_t irq_enabled)
4031 int ret;
4033 if (msix_enabled(&n->parent_obj)) {
4034 ret = msix_vector_use(&n->parent_obj, vector);
4035 assert(ret == 0);
4037 cq->ctrl = n;
4038 cq->cqid = cqid;
4039 cq->size = size;
4040 cq->dma_addr = dma_addr;
4041 cq->phase = 1;
4042 cq->irq_enabled = irq_enabled;
4043 cq->vector = vector;
4044 cq->head = cq->tail = 0;
4045 QTAILQ_INIT(&cq->req_list);
4046 QTAILQ_INIT(&cq->sq_list);
4047 n->cq[cqid] = cq;
4048 cq->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, nvme_post_cqes, cq);
4051 static uint16_t nvme_create_cq(NvmeCtrl *n, NvmeRequest *req)
4053 NvmeCQueue *cq;
4054 NvmeCreateCq *c = (NvmeCreateCq *)&req->cmd;
4055 uint16_t cqid = le16_to_cpu(c->cqid);
4056 uint16_t vector = le16_to_cpu(c->irq_vector);
4057 uint16_t qsize = le16_to_cpu(c->qsize);
4058 uint16_t qflags = le16_to_cpu(c->cq_flags);
4059 uint64_t prp1 = le64_to_cpu(c->prp1);
4061 trace_pci_nvme_create_cq(prp1, cqid, vector, qsize, qflags,
4062 NVME_CQ_FLAGS_IEN(qflags) != 0);
4064 if (unlikely(!cqid || cqid > n->params.max_ioqpairs ||
4065 n->cq[cqid] != NULL)) {
4066 trace_pci_nvme_err_invalid_create_cq_cqid(cqid);
4067 return NVME_INVALID_QID | NVME_DNR;
4069 if (unlikely(!qsize || qsize > NVME_CAP_MQES(n->bar.cap))) {
4070 trace_pci_nvme_err_invalid_create_cq_size(qsize);
4071 return NVME_MAX_QSIZE_EXCEEDED | NVME_DNR;
4073 if (unlikely(prp1 & (n->page_size - 1))) {
4074 trace_pci_nvme_err_invalid_create_cq_addr(prp1);
4075 return NVME_INVALID_PRP_OFFSET | NVME_DNR;
4077 if (unlikely(!msix_enabled(&n->parent_obj) && vector)) {
4078 trace_pci_nvme_err_invalid_create_cq_vector(vector);
4079 return NVME_INVALID_IRQ_VECTOR | NVME_DNR;
4081 if (unlikely(vector >= n->params.msix_qsize)) {
4082 trace_pci_nvme_err_invalid_create_cq_vector(vector);
4083 return NVME_INVALID_IRQ_VECTOR | NVME_DNR;
4085 if (unlikely(!(NVME_CQ_FLAGS_PC(qflags)))) {
4086 trace_pci_nvme_err_invalid_create_cq_qflags(NVME_CQ_FLAGS_PC(qflags));
4087 return NVME_INVALID_FIELD | NVME_DNR;
4090 cq = g_malloc0(sizeof(*cq));
4091 nvme_init_cq(cq, n, prp1, cqid, vector, qsize + 1,
4092 NVME_CQ_FLAGS_IEN(qflags));
4095 * It is only required to set qs_created when creating a completion queue;
4096 * creating a submission queue without a matching completion queue will
4097 * fail.
4099 n->qs_created = true;
4100 return NVME_SUCCESS;
4103 static uint16_t nvme_rpt_empty_id_struct(NvmeCtrl *n, NvmeRequest *req)
4105 uint8_t id[NVME_IDENTIFY_DATA_SIZE] = {};
4107 return nvme_c2h(n, id, sizeof(id), req);
4110 static inline bool nvme_csi_has_nvm_support(NvmeNamespace *ns)
4112 switch (ns->csi) {
4113 case NVME_CSI_NVM:
4114 case NVME_CSI_ZONED:
4115 return true;
4117 return false;
4120 static uint16_t nvme_identify_ctrl(NvmeCtrl *n, NvmeRequest *req)
4122 trace_pci_nvme_identify_ctrl();
4124 return nvme_c2h(n, (uint8_t *)&n->id_ctrl, sizeof(n->id_ctrl), req);
4127 static uint16_t nvme_identify_ctrl_csi(NvmeCtrl *n, NvmeRequest *req)
4129 NvmeIdentify *c = (NvmeIdentify *)&req->cmd;
4130 uint8_t id[NVME_IDENTIFY_DATA_SIZE] = {};
4131 NvmeIdCtrlNvm *id_nvm = (NvmeIdCtrlNvm *)&id;
4133 trace_pci_nvme_identify_ctrl_csi(c->csi);
4135 switch (c->csi) {
4136 case NVME_CSI_NVM:
4137 id_nvm->vsl = n->params.vsl;
4138 id_nvm->dmrsl = cpu_to_le32(n->dmrsl);
4139 break;
4141 case NVME_CSI_ZONED:
4142 ((NvmeIdCtrlZoned *)&id)->zasl = n->params.zasl;
4143 break;
4145 default:
4146 return NVME_INVALID_FIELD | NVME_DNR;
4149 return nvme_c2h(n, id, sizeof(id), req);
4152 static uint16_t nvme_identify_ns(NvmeCtrl *n, NvmeRequest *req, bool active)
4154 NvmeNamespace *ns;
4155 NvmeIdentify *c = (NvmeIdentify *)&req->cmd;
4156 uint32_t nsid = le32_to_cpu(c->nsid);
4158 trace_pci_nvme_identify_ns(nsid);
4160 if (!nvme_nsid_valid(n, nsid) || nsid == NVME_NSID_BROADCAST) {
4161 return NVME_INVALID_NSID | NVME_DNR;
4164 ns = nvme_ns(n, nsid);
4165 if (unlikely(!ns)) {
4166 if (!active) {
4167 ns = nvme_subsys_ns(n->subsys, nsid);
4168 if (!ns) {
4169 return nvme_rpt_empty_id_struct(n, req);
4171 } else {
4172 return nvme_rpt_empty_id_struct(n, req);
4176 if (c->csi == NVME_CSI_NVM && nvme_csi_has_nvm_support(ns)) {
4177 return nvme_c2h(n, (uint8_t *)&ns->id_ns, sizeof(NvmeIdNs), req);
4180 return NVME_INVALID_CMD_SET | NVME_DNR;
4183 static uint16_t nvme_identify_ns_attached_list(NvmeCtrl *n, NvmeRequest *req)
4185 NvmeIdentify *c = (NvmeIdentify *)&req->cmd;
4186 uint16_t min_id = le16_to_cpu(c->ctrlid);
4187 uint16_t list[NVME_CONTROLLER_LIST_SIZE] = {};
4188 uint16_t *ids = &list[1];
4189 NvmeNamespace *ns;
4190 NvmeCtrl *ctrl;
4191 int cntlid, nr_ids = 0;
4193 trace_pci_nvme_identify_ns_attached_list(min_id);
4195 if (c->nsid == NVME_NSID_BROADCAST) {
4196 return NVME_INVALID_FIELD | NVME_DNR;
4199 ns = nvme_subsys_ns(n->subsys, c->nsid);
4200 if (!ns) {
4201 return NVME_INVALID_FIELD | NVME_DNR;
4204 for (cntlid = min_id; cntlid < ARRAY_SIZE(n->subsys->ctrls); cntlid++) {
4205 ctrl = nvme_subsys_ctrl(n->subsys, cntlid);
4206 if (!ctrl) {
4207 continue;
4210 if (!nvme_ns_is_attached(ctrl, ns)) {
4211 continue;
4214 ids[nr_ids++] = cntlid;
4217 list[0] = nr_ids;
4219 return nvme_c2h(n, (uint8_t *)list, sizeof(list), req);
4222 static uint16_t nvme_identify_ns_csi(NvmeCtrl *n, NvmeRequest *req,
4223 bool active)
4225 NvmeNamespace *ns;
4226 NvmeIdentify *c = (NvmeIdentify *)&req->cmd;
4227 uint32_t nsid = le32_to_cpu(c->nsid);
4229 trace_pci_nvme_identify_ns_csi(nsid, c->csi);
4231 if (!nvme_nsid_valid(n, nsid) || nsid == NVME_NSID_BROADCAST) {
4232 return NVME_INVALID_NSID | NVME_DNR;
4235 ns = nvme_ns(n, nsid);
4236 if (unlikely(!ns)) {
4237 if (!active) {
4238 ns = nvme_subsys_ns(n->subsys, nsid);
4239 if (!ns) {
4240 return nvme_rpt_empty_id_struct(n, req);
4242 } else {
4243 return nvme_rpt_empty_id_struct(n, req);
4247 if (c->csi == NVME_CSI_NVM && nvme_csi_has_nvm_support(ns)) {
4248 return nvme_rpt_empty_id_struct(n, req);
4249 } else if (c->csi == NVME_CSI_ZONED && ns->csi == NVME_CSI_ZONED) {
4250 return nvme_c2h(n, (uint8_t *)ns->id_ns_zoned, sizeof(NvmeIdNsZoned),
4251 req);
4254 return NVME_INVALID_FIELD | NVME_DNR;
4257 static uint16_t nvme_identify_nslist(NvmeCtrl *n, NvmeRequest *req,
4258 bool active)
4260 NvmeNamespace *ns;
4261 NvmeIdentify *c = (NvmeIdentify *)&req->cmd;
4262 uint32_t min_nsid = le32_to_cpu(c->nsid);
4263 uint8_t list[NVME_IDENTIFY_DATA_SIZE] = {};
4264 static const int data_len = sizeof(list);
4265 uint32_t *list_ptr = (uint32_t *)list;
4266 int i, j = 0;
4268 trace_pci_nvme_identify_nslist(min_nsid);
4271 * Both 0xffffffff (NVME_NSID_BROADCAST) and 0xfffffffe are invalid values
4272 * since the Active Namespace ID List should return namespaces with ids
4273 * *higher* than the NSID specified in the command. This is also specified
4274 * in the spec (NVM Express v1.3d, Section 5.15.4).
4276 if (min_nsid >= NVME_NSID_BROADCAST - 1) {
4277 return NVME_INVALID_NSID | NVME_DNR;
4280 for (i = 1; i <= n->num_namespaces; i++) {
4281 ns = nvme_ns(n, i);
4282 if (!ns) {
4283 if (!active) {
4284 ns = nvme_subsys_ns(n->subsys, i);
4285 if (!ns) {
4286 continue;
4288 } else {
4289 continue;
4292 if (ns->params.nsid <= min_nsid) {
4293 continue;
4295 list_ptr[j++] = cpu_to_le32(ns->params.nsid);
4296 if (j == data_len / sizeof(uint32_t)) {
4297 break;
4301 return nvme_c2h(n, list, data_len, req);
4304 static uint16_t nvme_identify_nslist_csi(NvmeCtrl *n, NvmeRequest *req,
4305 bool active)
4307 NvmeNamespace *ns;
4308 NvmeIdentify *c = (NvmeIdentify *)&req->cmd;
4309 uint32_t min_nsid = le32_to_cpu(c->nsid);
4310 uint8_t list[NVME_IDENTIFY_DATA_SIZE] = {};
4311 static const int data_len = sizeof(list);
4312 uint32_t *list_ptr = (uint32_t *)list;
4313 int i, j = 0;
4315 trace_pci_nvme_identify_nslist_csi(min_nsid, c->csi);
4318 * Same as in nvme_identify_nslist(), 0xffffffff/0xfffffffe are invalid.
4320 if (min_nsid >= NVME_NSID_BROADCAST - 1) {
4321 return NVME_INVALID_NSID | NVME_DNR;
4324 if (c->csi != NVME_CSI_NVM && c->csi != NVME_CSI_ZONED) {
4325 return NVME_INVALID_FIELD | NVME_DNR;
4328 for (i = 1; i <= n->num_namespaces; i++) {
4329 ns = nvme_ns(n, i);
4330 if (!ns) {
4331 if (!active) {
4332 ns = nvme_subsys_ns(n->subsys, i);
4333 if (!ns) {
4334 continue;
4336 } else {
4337 continue;
4340 if (ns->params.nsid <= min_nsid || c->csi != ns->csi) {
4341 continue;
4343 list_ptr[j++] = cpu_to_le32(ns->params.nsid);
4344 if (j == data_len / sizeof(uint32_t)) {
4345 break;
4349 return nvme_c2h(n, list, data_len, req);
4352 static uint16_t nvme_identify_ns_descr_list(NvmeCtrl *n, NvmeRequest *req)
4354 NvmeNamespace *ns;
4355 NvmeIdentify *c = (NvmeIdentify *)&req->cmd;
4356 uint32_t nsid = le32_to_cpu(c->nsid);
4357 uint8_t list[NVME_IDENTIFY_DATA_SIZE] = {};
4359 struct data {
4360 struct {
4361 NvmeIdNsDescr hdr;
4362 uint8_t v[NVME_NIDL_UUID];
4363 } uuid;
4364 struct {
4365 NvmeIdNsDescr hdr;
4366 uint8_t v;
4367 } csi;
4370 struct data *ns_descrs = (struct data *)list;
4372 trace_pci_nvme_identify_ns_descr_list(nsid);
4374 if (!nvme_nsid_valid(n, nsid) || nsid == NVME_NSID_BROADCAST) {
4375 return NVME_INVALID_NSID | NVME_DNR;
4378 ns = nvme_ns(n, nsid);
4379 if (unlikely(!ns)) {
4380 return NVME_INVALID_FIELD | NVME_DNR;
4384 * Because the NGUID and EUI64 fields are 0 in the Identify Namespace data
4385 * structure, a Namespace UUID (nidt = 0x3) must be reported in the
4386 * Namespace Identification Descriptor. Add the namespace UUID here.
4388 ns_descrs->uuid.hdr.nidt = NVME_NIDT_UUID;
4389 ns_descrs->uuid.hdr.nidl = NVME_NIDL_UUID;
4390 memcpy(&ns_descrs->uuid.v, ns->params.uuid.data, NVME_NIDL_UUID);
4392 ns_descrs->csi.hdr.nidt = NVME_NIDT_CSI;
4393 ns_descrs->csi.hdr.nidl = NVME_NIDL_CSI;
4394 ns_descrs->csi.v = ns->csi;
4396 return nvme_c2h(n, list, sizeof(list), req);
4399 static uint16_t nvme_identify_cmd_set(NvmeCtrl *n, NvmeRequest *req)
4401 uint8_t list[NVME_IDENTIFY_DATA_SIZE] = {};
4402 static const int data_len = sizeof(list);
4404 trace_pci_nvme_identify_cmd_set();
4406 NVME_SET_CSI(*list, NVME_CSI_NVM);
4407 NVME_SET_CSI(*list, NVME_CSI_ZONED);
4409 return nvme_c2h(n, list, data_len, req);
4412 static uint16_t nvme_identify(NvmeCtrl *n, NvmeRequest *req)
4414 NvmeIdentify *c = (NvmeIdentify *)&req->cmd;
4416 trace_pci_nvme_identify(nvme_cid(req), c->cns, le16_to_cpu(c->ctrlid),
4417 c->csi);
4419 switch (c->cns) {
4420 case NVME_ID_CNS_NS:
4421 return nvme_identify_ns(n, req, true);
4422 case NVME_ID_CNS_NS_PRESENT:
4423 return nvme_identify_ns(n, req, false);
4424 case NVME_ID_CNS_NS_ATTACHED_CTRL_LIST:
4425 return nvme_identify_ns_attached_list(n, req);
4426 case NVME_ID_CNS_CS_NS:
4427 return nvme_identify_ns_csi(n, req, true);
4428 case NVME_ID_CNS_CS_NS_PRESENT:
4429 return nvme_identify_ns_csi(n, req, false);
4430 case NVME_ID_CNS_CTRL:
4431 return nvme_identify_ctrl(n, req);
4432 case NVME_ID_CNS_CS_CTRL:
4433 return nvme_identify_ctrl_csi(n, req);
4434 case NVME_ID_CNS_NS_ACTIVE_LIST:
4435 return nvme_identify_nslist(n, req, true);
4436 case NVME_ID_CNS_NS_PRESENT_LIST:
4437 return nvme_identify_nslist(n, req, false);
4438 case NVME_ID_CNS_CS_NS_ACTIVE_LIST:
4439 return nvme_identify_nslist_csi(n, req, true);
4440 case NVME_ID_CNS_CS_NS_PRESENT_LIST:
4441 return nvme_identify_nslist_csi(n, req, false);
4442 case NVME_ID_CNS_NS_DESCR_LIST:
4443 return nvme_identify_ns_descr_list(n, req);
4444 case NVME_ID_CNS_IO_COMMAND_SET:
4445 return nvme_identify_cmd_set(n, req);
4446 default:
4447 trace_pci_nvme_err_invalid_identify_cns(le32_to_cpu(c->cns));
4448 return NVME_INVALID_FIELD | NVME_DNR;
4452 static uint16_t nvme_abort(NvmeCtrl *n, NvmeRequest *req)
4454 uint16_t sqid = le32_to_cpu(req->cmd.cdw10) & 0xffff;
4456 req->cqe.result = 1;
4457 if (nvme_check_sqid(n, sqid)) {
4458 return NVME_INVALID_FIELD | NVME_DNR;
4461 return NVME_SUCCESS;
4464 static inline void nvme_set_timestamp(NvmeCtrl *n, uint64_t ts)
4466 trace_pci_nvme_setfeat_timestamp(ts);
4468 n->host_timestamp = le64_to_cpu(ts);
4469 n->timestamp_set_qemu_clock_ms = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL);
4472 static inline uint64_t nvme_get_timestamp(const NvmeCtrl *n)
4474 uint64_t current_time = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL);
4475 uint64_t elapsed_time = current_time - n->timestamp_set_qemu_clock_ms;
4477 union nvme_timestamp {
4478 struct {
4479 uint64_t timestamp:48;
4480 uint64_t sync:1;
4481 uint64_t origin:3;
4482 uint64_t rsvd1:12;
4484 uint64_t all;
4487 union nvme_timestamp ts;
4488 ts.all = 0;
4489 ts.timestamp = n->host_timestamp + elapsed_time;
4491 /* If the host timestamp is non-zero, set the timestamp origin */
4492 ts.origin = n->host_timestamp ? 0x01 : 0x00;
4494 trace_pci_nvme_getfeat_timestamp(ts.all);
4496 return cpu_to_le64(ts.all);
4499 static uint16_t nvme_get_feature_timestamp(NvmeCtrl *n, NvmeRequest *req)
4501 uint64_t timestamp = nvme_get_timestamp(n);
4503 return nvme_c2h(n, (uint8_t *)&timestamp, sizeof(timestamp), req);
4506 static uint16_t nvme_get_feature(NvmeCtrl *n, NvmeRequest *req)
4508 NvmeCmd *cmd = &req->cmd;
4509 uint32_t dw10 = le32_to_cpu(cmd->cdw10);
4510 uint32_t dw11 = le32_to_cpu(cmd->cdw11);
4511 uint32_t nsid = le32_to_cpu(cmd->nsid);
4512 uint32_t result;
4513 uint8_t fid = NVME_GETSETFEAT_FID(dw10);
4514 NvmeGetFeatureSelect sel = NVME_GETFEAT_SELECT(dw10);
4515 uint16_t iv;
4516 NvmeNamespace *ns;
4517 int i;
4519 static const uint32_t nvme_feature_default[NVME_FID_MAX] = {
4520 [NVME_ARBITRATION] = NVME_ARB_AB_NOLIMIT,
4523 trace_pci_nvme_getfeat(nvme_cid(req), nsid, fid, sel, dw11);
4525 if (!nvme_feature_support[fid]) {
4526 return NVME_INVALID_FIELD | NVME_DNR;
4529 if (nvme_feature_cap[fid] & NVME_FEAT_CAP_NS) {
4530 if (!nvme_nsid_valid(n, nsid) || nsid == NVME_NSID_BROADCAST) {
4532 * The Reservation Notification Mask and Reservation Persistence
4533 * features require a status code of Invalid Field in Command when
4534 * NSID is 0xFFFFFFFF. Since the device does not support those
4535 * features we can always return Invalid Namespace or Format as we
4536 * should do for all other features.
4538 return NVME_INVALID_NSID | NVME_DNR;
4541 if (!nvme_ns(n, nsid)) {
4542 return NVME_INVALID_FIELD | NVME_DNR;
4546 switch (sel) {
4547 case NVME_GETFEAT_SELECT_CURRENT:
4548 break;
4549 case NVME_GETFEAT_SELECT_SAVED:
4550 /* no features are saveable by the controller; fallthrough */
4551 case NVME_GETFEAT_SELECT_DEFAULT:
4552 goto defaults;
4553 case NVME_GETFEAT_SELECT_CAP:
4554 result = nvme_feature_cap[fid];
4555 goto out;
4558 switch (fid) {
4559 case NVME_TEMPERATURE_THRESHOLD:
4560 result = 0;
4563 * The controller only implements the Composite Temperature sensor, so
4564 * return 0 for all other sensors.
4566 if (NVME_TEMP_TMPSEL(dw11) != NVME_TEMP_TMPSEL_COMPOSITE) {
4567 goto out;
4570 switch (NVME_TEMP_THSEL(dw11)) {
4571 case NVME_TEMP_THSEL_OVER:
4572 result = n->features.temp_thresh_hi;
4573 goto out;
4574 case NVME_TEMP_THSEL_UNDER:
4575 result = n->features.temp_thresh_low;
4576 goto out;
4579 return NVME_INVALID_FIELD | NVME_DNR;
4580 case NVME_ERROR_RECOVERY:
4581 if (!nvme_nsid_valid(n, nsid)) {
4582 return NVME_INVALID_NSID | NVME_DNR;
4585 ns = nvme_ns(n, nsid);
4586 if (unlikely(!ns)) {
4587 return NVME_INVALID_FIELD | NVME_DNR;
4590 result = ns->features.err_rec;
4591 goto out;
4592 case NVME_VOLATILE_WRITE_CACHE:
4593 result = 0;
4594 for (i = 1; i <= n->num_namespaces; i++) {
4595 ns = nvme_ns(n, i);
4596 if (!ns) {
4597 continue;
4600 result = blk_enable_write_cache(ns->blkconf.blk);
4601 if (result) {
4602 break;
4605 trace_pci_nvme_getfeat_vwcache(result ? "enabled" : "disabled");
4606 goto out;
4607 case NVME_ASYNCHRONOUS_EVENT_CONF:
4608 result = n->features.async_config;
4609 goto out;
4610 case NVME_TIMESTAMP:
4611 return nvme_get_feature_timestamp(n, req);
4612 default:
4613 break;
4616 defaults:
4617 switch (fid) {
4618 case NVME_TEMPERATURE_THRESHOLD:
4619 result = 0;
4621 if (NVME_TEMP_TMPSEL(dw11) != NVME_TEMP_TMPSEL_COMPOSITE) {
4622 break;
4625 if (NVME_TEMP_THSEL(dw11) == NVME_TEMP_THSEL_OVER) {
4626 result = NVME_TEMPERATURE_WARNING;
4629 break;
4630 case NVME_NUMBER_OF_QUEUES:
4631 result = (n->params.max_ioqpairs - 1) |
4632 ((n->params.max_ioqpairs - 1) << 16);
4633 trace_pci_nvme_getfeat_numq(result);
4634 break;
4635 case NVME_INTERRUPT_VECTOR_CONF:
4636 iv = dw11 & 0xffff;
4637 if (iv >= n->params.max_ioqpairs + 1) {
4638 return NVME_INVALID_FIELD | NVME_DNR;
4641 result = iv;
4642 if (iv == n->admin_cq.vector) {
4643 result |= NVME_INTVC_NOCOALESCING;
4645 break;
4646 case NVME_COMMAND_SET_PROFILE:
4647 result = 0;
4648 break;
4649 default:
4650 result = nvme_feature_default[fid];
4651 break;
4654 out:
4655 req->cqe.result = cpu_to_le32(result);
4656 return NVME_SUCCESS;
4659 static uint16_t nvme_set_feature_timestamp(NvmeCtrl *n, NvmeRequest *req)
4661 uint16_t ret;
4662 uint64_t timestamp;
4664 ret = nvme_h2c(n, (uint8_t *)&timestamp, sizeof(timestamp), req);
4665 if (ret) {
4666 return ret;
4669 nvme_set_timestamp(n, timestamp);
4671 return NVME_SUCCESS;
4674 static uint16_t nvme_set_feature(NvmeCtrl *n, NvmeRequest *req)
4676 NvmeNamespace *ns = NULL;
4678 NvmeCmd *cmd = &req->cmd;
4679 uint32_t dw10 = le32_to_cpu(cmd->cdw10);
4680 uint32_t dw11 = le32_to_cpu(cmd->cdw11);
4681 uint32_t nsid = le32_to_cpu(cmd->nsid);
4682 uint8_t fid = NVME_GETSETFEAT_FID(dw10);
4683 uint8_t save = NVME_SETFEAT_SAVE(dw10);
4684 int i;
4686 trace_pci_nvme_setfeat(nvme_cid(req), nsid, fid, save, dw11);
4688 if (save && !(nvme_feature_cap[fid] & NVME_FEAT_CAP_SAVE)) {
4689 return NVME_FID_NOT_SAVEABLE | NVME_DNR;
4692 if (!nvme_feature_support[fid]) {
4693 return NVME_INVALID_FIELD | NVME_DNR;
4696 if (nvme_feature_cap[fid] & NVME_FEAT_CAP_NS) {
4697 if (nsid != NVME_NSID_BROADCAST) {
4698 if (!nvme_nsid_valid(n, nsid)) {
4699 return NVME_INVALID_NSID | NVME_DNR;
4702 ns = nvme_ns(n, nsid);
4703 if (unlikely(!ns)) {
4704 return NVME_INVALID_FIELD | NVME_DNR;
4707 } else if (nsid && nsid != NVME_NSID_BROADCAST) {
4708 if (!nvme_nsid_valid(n, nsid)) {
4709 return NVME_INVALID_NSID | NVME_DNR;
4712 return NVME_FEAT_NOT_NS_SPEC | NVME_DNR;
4715 if (!(nvme_feature_cap[fid] & NVME_FEAT_CAP_CHANGE)) {
4716 return NVME_FEAT_NOT_CHANGEABLE | NVME_DNR;
4719 switch (fid) {
4720 case NVME_TEMPERATURE_THRESHOLD:
4721 if (NVME_TEMP_TMPSEL(dw11) != NVME_TEMP_TMPSEL_COMPOSITE) {
4722 break;
4725 switch (NVME_TEMP_THSEL(dw11)) {
4726 case NVME_TEMP_THSEL_OVER:
4727 n->features.temp_thresh_hi = NVME_TEMP_TMPTH(dw11);
4728 break;
4729 case NVME_TEMP_THSEL_UNDER:
4730 n->features.temp_thresh_low = NVME_TEMP_TMPTH(dw11);
4731 break;
4732 default:
4733 return NVME_INVALID_FIELD | NVME_DNR;
4736 if ((n->temperature >= n->features.temp_thresh_hi) ||
4737 (n->temperature <= n->features.temp_thresh_low)) {
4738 nvme_smart_event(n, NVME_AER_INFO_SMART_TEMP_THRESH);
4741 break;
4742 case NVME_ERROR_RECOVERY:
4743 if (nsid == NVME_NSID_BROADCAST) {
4744 for (i = 1; i <= n->num_namespaces; i++) {
4745 ns = nvme_ns(n, i);
4747 if (!ns) {
4748 continue;
4751 if (NVME_ID_NS_NSFEAT_DULBE(ns->id_ns.nsfeat)) {
4752 ns->features.err_rec = dw11;
4756 break;
4759 assert(ns);
4760 if (NVME_ID_NS_NSFEAT_DULBE(ns->id_ns.nsfeat)) {
4761 ns->features.err_rec = dw11;
4763 break;
4764 case NVME_VOLATILE_WRITE_CACHE:
4765 for (i = 1; i <= n->num_namespaces; i++) {
4766 ns = nvme_ns(n, i);
4767 if (!ns) {
4768 continue;
4771 if (!(dw11 & 0x1) && blk_enable_write_cache(ns->blkconf.blk)) {
4772 blk_flush(ns->blkconf.blk);
4775 blk_set_enable_write_cache(ns->blkconf.blk, dw11 & 1);
4778 break;
4780 case NVME_NUMBER_OF_QUEUES:
4781 if (n->qs_created) {
4782 return NVME_CMD_SEQ_ERROR | NVME_DNR;
4786 * NVMe v1.3, Section 5.21.1.7: 0xffff is not an allowed value for NCQR
4787 * and NSQR.
4789 if ((dw11 & 0xffff) == 0xffff || ((dw11 >> 16) & 0xffff) == 0xffff) {
4790 return NVME_INVALID_FIELD | NVME_DNR;
4793 trace_pci_nvme_setfeat_numq((dw11 & 0xFFFF) + 1,
4794 ((dw11 >> 16) & 0xFFFF) + 1,
4795 n->params.max_ioqpairs,
4796 n->params.max_ioqpairs);
4797 req->cqe.result = cpu_to_le32((n->params.max_ioqpairs - 1) |
4798 ((n->params.max_ioqpairs - 1) << 16));
4799 break;
4800 case NVME_ASYNCHRONOUS_EVENT_CONF:
4801 n->features.async_config = dw11;
4802 break;
4803 case NVME_TIMESTAMP:
4804 return nvme_set_feature_timestamp(n, req);
4805 case NVME_COMMAND_SET_PROFILE:
4806 if (dw11 & 0x1ff) {
4807 trace_pci_nvme_err_invalid_iocsci(dw11 & 0x1ff);
4808 return NVME_CMD_SET_CMB_REJECTED | NVME_DNR;
4810 break;
4811 default:
4812 return NVME_FEAT_NOT_CHANGEABLE | NVME_DNR;
4814 return NVME_SUCCESS;
4817 static uint16_t nvme_aer(NvmeCtrl *n, NvmeRequest *req)
4819 trace_pci_nvme_aer(nvme_cid(req));
4821 if (n->outstanding_aers > n->params.aerl) {
4822 trace_pci_nvme_aer_aerl_exceeded();
4823 return NVME_AER_LIMIT_EXCEEDED;
4826 n->aer_reqs[n->outstanding_aers] = req;
4827 n->outstanding_aers++;
4829 if (!QTAILQ_EMPTY(&n->aer_queue)) {
4830 nvme_process_aers(n);
4833 return NVME_NO_COMPLETE;
4836 static void __nvme_select_ns_iocs(NvmeCtrl *n, NvmeNamespace *ns);
4837 static uint16_t nvme_ns_attachment(NvmeCtrl *n, NvmeRequest *req)
4839 NvmeNamespace *ns;
4840 NvmeCtrl *ctrl;
4841 uint16_t list[NVME_CONTROLLER_LIST_SIZE] = {};
4842 uint32_t nsid = le32_to_cpu(req->cmd.nsid);
4843 uint32_t dw10 = le32_to_cpu(req->cmd.cdw10);
4844 bool attach = !(dw10 & 0xf);
4845 uint16_t *nr_ids = &list[0];
4846 uint16_t *ids = &list[1];
4847 uint16_t ret;
4848 int i;
4850 trace_pci_nvme_ns_attachment(nvme_cid(req), dw10 & 0xf);
4852 ns = nvme_subsys_ns(n->subsys, nsid);
4853 if (!ns) {
4854 return NVME_INVALID_FIELD | NVME_DNR;
4857 ret = nvme_h2c(n, (uint8_t *)list, 4096, req);
4858 if (ret) {
4859 return ret;
4862 if (!*nr_ids) {
4863 return NVME_NS_CTRL_LIST_INVALID | NVME_DNR;
4866 for (i = 0; i < *nr_ids; i++) {
4867 ctrl = nvme_subsys_ctrl(n->subsys, ids[i]);
4868 if (!ctrl) {
4869 return NVME_NS_CTRL_LIST_INVALID | NVME_DNR;
4872 if (attach) {
4873 if (nvme_ns_is_attached(ctrl, ns)) {
4874 return NVME_NS_ALREADY_ATTACHED | NVME_DNR;
4877 nvme_ns_attach(ctrl, ns);
4878 __nvme_select_ns_iocs(ctrl, ns);
4879 } else {
4880 if (!nvme_ns_is_attached(ctrl, ns)) {
4881 return NVME_NS_NOT_ATTACHED | NVME_DNR;
4884 nvme_ns_detach(ctrl, ns);
4888 * Add namespace id to the changed namespace id list for event clearing
4889 * via Get Log Page command.
4891 if (!test_and_set_bit(nsid, ctrl->changed_nsids)) {
4892 nvme_enqueue_event(ctrl, NVME_AER_TYPE_NOTICE,
4893 NVME_AER_INFO_NOTICE_NS_ATTR_CHANGED,
4894 NVME_LOG_CHANGED_NSLIST);
4898 return NVME_SUCCESS;
4901 static uint16_t nvme_admin_cmd(NvmeCtrl *n, NvmeRequest *req)
4903 trace_pci_nvme_admin_cmd(nvme_cid(req), nvme_sqid(req), req->cmd.opcode,
4904 nvme_adm_opc_str(req->cmd.opcode));
4906 if (!(nvme_cse_acs[req->cmd.opcode] & NVME_CMD_EFF_CSUPP)) {
4907 trace_pci_nvme_err_invalid_admin_opc(req->cmd.opcode);
4908 return NVME_INVALID_OPCODE | NVME_DNR;
4911 /* SGLs shall not be used for Admin commands in NVMe over PCIe */
4912 if (NVME_CMD_FLAGS_PSDT(req->cmd.flags) != NVME_PSDT_PRP) {
4913 return NVME_INVALID_FIELD | NVME_DNR;
4916 switch (req->cmd.opcode) {
4917 case NVME_ADM_CMD_DELETE_SQ:
4918 return nvme_del_sq(n, req);
4919 case NVME_ADM_CMD_CREATE_SQ:
4920 return nvme_create_sq(n, req);
4921 case NVME_ADM_CMD_GET_LOG_PAGE:
4922 return nvme_get_log(n, req);
4923 case NVME_ADM_CMD_DELETE_CQ:
4924 return nvme_del_cq(n, req);
4925 case NVME_ADM_CMD_CREATE_CQ:
4926 return nvme_create_cq(n, req);
4927 case NVME_ADM_CMD_IDENTIFY:
4928 return nvme_identify(n, req);
4929 case NVME_ADM_CMD_ABORT:
4930 return nvme_abort(n, req);
4931 case NVME_ADM_CMD_SET_FEATURES:
4932 return nvme_set_feature(n, req);
4933 case NVME_ADM_CMD_GET_FEATURES:
4934 return nvme_get_feature(n, req);
4935 case NVME_ADM_CMD_ASYNC_EV_REQ:
4936 return nvme_aer(n, req);
4937 case NVME_ADM_CMD_NS_ATTACHMENT:
4938 return nvme_ns_attachment(n, req);
4939 default:
4940 assert(false);
4943 return NVME_INVALID_OPCODE | NVME_DNR;
4946 static void nvme_process_sq(void *opaque)
4948 NvmeSQueue *sq = opaque;
4949 NvmeCtrl *n = sq->ctrl;
4950 NvmeCQueue *cq = n->cq[sq->cqid];
4952 uint16_t status;
4953 hwaddr addr;
4954 NvmeCmd cmd;
4955 NvmeRequest *req;
4957 while (!(nvme_sq_empty(sq) || QTAILQ_EMPTY(&sq->req_list))) {
4958 addr = sq->dma_addr + sq->head * n->sqe_size;
4959 if (nvme_addr_read(n, addr, (void *)&cmd, sizeof(cmd))) {
4960 trace_pci_nvme_err_addr_read(addr);
4961 trace_pci_nvme_err_cfs();
4962 n->bar.csts = NVME_CSTS_FAILED;
4963 break;
4965 nvme_inc_sq_head(sq);
4967 req = QTAILQ_FIRST(&sq->req_list);
4968 QTAILQ_REMOVE(&sq->req_list, req, entry);
4969 QTAILQ_INSERT_TAIL(&sq->out_req_list, req, entry);
4970 nvme_req_clear(req);
4971 req->cqe.cid = cmd.cid;
4972 memcpy(&req->cmd, &cmd, sizeof(NvmeCmd));
4974 status = sq->sqid ? nvme_io_cmd(n, req) :
4975 nvme_admin_cmd(n, req);
4976 if (status != NVME_NO_COMPLETE) {
4977 req->status = status;
4978 nvme_enqueue_req_completion(cq, req);
4983 static void nvme_ctrl_reset(NvmeCtrl *n)
4985 NvmeNamespace *ns;
4986 int i;
4988 for (i = 1; i <= n->num_namespaces; i++) {
4989 ns = nvme_ns(n, i);
4990 if (!ns) {
4991 continue;
4994 nvme_ns_drain(ns);
4997 for (i = 0; i < n->params.max_ioqpairs + 1; i++) {
4998 if (n->sq[i] != NULL) {
4999 nvme_free_sq(n->sq[i], n);
5002 for (i = 0; i < n->params.max_ioqpairs + 1; i++) {
5003 if (n->cq[i] != NULL) {
5004 nvme_free_cq(n->cq[i], n);
5008 while (!QTAILQ_EMPTY(&n->aer_queue)) {
5009 NvmeAsyncEvent *event = QTAILQ_FIRST(&n->aer_queue);
5010 QTAILQ_REMOVE(&n->aer_queue, event, entry);
5011 g_free(event);
5014 n->aer_queued = 0;
5015 n->outstanding_aers = 0;
5016 n->qs_created = false;
5018 n->bar.cc = 0;
5021 static void nvme_ctrl_shutdown(NvmeCtrl *n)
5023 NvmeNamespace *ns;
5024 int i;
5026 if (n->pmr.dev) {
5027 memory_region_msync(&n->pmr.dev->mr, 0, n->pmr.dev->size);
5030 for (i = 1; i <= n->num_namespaces; i++) {
5031 ns = nvme_ns(n, i);
5032 if (!ns) {
5033 continue;
5036 nvme_ns_shutdown(ns);
5040 static void __nvme_select_ns_iocs(NvmeCtrl *n, NvmeNamespace *ns)
5042 ns->iocs = nvme_cse_iocs_none;
5043 switch (ns->csi) {
5044 case NVME_CSI_NVM:
5045 if (NVME_CC_CSS(n->bar.cc) != NVME_CC_CSS_ADMIN_ONLY) {
5046 ns->iocs = nvme_cse_iocs_nvm;
5048 break;
5049 case NVME_CSI_ZONED:
5050 if (NVME_CC_CSS(n->bar.cc) == NVME_CC_CSS_CSI) {
5051 ns->iocs = nvme_cse_iocs_zoned;
5052 } else if (NVME_CC_CSS(n->bar.cc) == NVME_CC_CSS_NVM) {
5053 ns->iocs = nvme_cse_iocs_nvm;
5055 break;
5059 static void nvme_select_ns_iocs(NvmeCtrl *n)
5061 NvmeNamespace *ns;
5062 int i;
5064 for (i = 1; i <= n->num_namespaces; i++) {
5065 ns = nvme_ns(n, i);
5066 if (!ns) {
5067 continue;
5070 __nvme_select_ns_iocs(n, ns);
5074 static int nvme_start_ctrl(NvmeCtrl *n)
5076 uint32_t page_bits = NVME_CC_MPS(n->bar.cc) + 12;
5077 uint32_t page_size = 1 << page_bits;
5079 if (unlikely(n->cq[0])) {
5080 trace_pci_nvme_err_startfail_cq();
5081 return -1;
5083 if (unlikely(n->sq[0])) {
5084 trace_pci_nvme_err_startfail_sq();
5085 return -1;
5087 if (unlikely(!n->bar.asq)) {
5088 trace_pci_nvme_err_startfail_nbarasq();
5089 return -1;
5091 if (unlikely(!n->bar.acq)) {
5092 trace_pci_nvme_err_startfail_nbaracq();
5093 return -1;
5095 if (unlikely(n->bar.asq & (page_size - 1))) {
5096 trace_pci_nvme_err_startfail_asq_misaligned(n->bar.asq);
5097 return -1;
5099 if (unlikely(n->bar.acq & (page_size - 1))) {
5100 trace_pci_nvme_err_startfail_acq_misaligned(n->bar.acq);
5101 return -1;
5103 if (unlikely(!(NVME_CAP_CSS(n->bar.cap) & (1 << NVME_CC_CSS(n->bar.cc))))) {
5104 trace_pci_nvme_err_startfail_css(NVME_CC_CSS(n->bar.cc));
5105 return -1;
5107 if (unlikely(NVME_CC_MPS(n->bar.cc) <
5108 NVME_CAP_MPSMIN(n->bar.cap))) {
5109 trace_pci_nvme_err_startfail_page_too_small(
5110 NVME_CC_MPS(n->bar.cc),
5111 NVME_CAP_MPSMIN(n->bar.cap));
5112 return -1;
5114 if (unlikely(NVME_CC_MPS(n->bar.cc) >
5115 NVME_CAP_MPSMAX(n->bar.cap))) {
5116 trace_pci_nvme_err_startfail_page_too_large(
5117 NVME_CC_MPS(n->bar.cc),
5118 NVME_CAP_MPSMAX(n->bar.cap));
5119 return -1;
5121 if (unlikely(NVME_CC_IOCQES(n->bar.cc) <
5122 NVME_CTRL_CQES_MIN(n->id_ctrl.cqes))) {
5123 trace_pci_nvme_err_startfail_cqent_too_small(
5124 NVME_CC_IOCQES(n->bar.cc),
5125 NVME_CTRL_CQES_MIN(n->bar.cap));
5126 return -1;
5128 if (unlikely(NVME_CC_IOCQES(n->bar.cc) >
5129 NVME_CTRL_CQES_MAX(n->id_ctrl.cqes))) {
5130 trace_pci_nvme_err_startfail_cqent_too_large(
5131 NVME_CC_IOCQES(n->bar.cc),
5132 NVME_CTRL_CQES_MAX(n->bar.cap));
5133 return -1;
5135 if (unlikely(NVME_CC_IOSQES(n->bar.cc) <
5136 NVME_CTRL_SQES_MIN(n->id_ctrl.sqes))) {
5137 trace_pci_nvme_err_startfail_sqent_too_small(
5138 NVME_CC_IOSQES(n->bar.cc),
5139 NVME_CTRL_SQES_MIN(n->bar.cap));
5140 return -1;
5142 if (unlikely(NVME_CC_IOSQES(n->bar.cc) >
5143 NVME_CTRL_SQES_MAX(n->id_ctrl.sqes))) {
5144 trace_pci_nvme_err_startfail_sqent_too_large(
5145 NVME_CC_IOSQES(n->bar.cc),
5146 NVME_CTRL_SQES_MAX(n->bar.cap));
5147 return -1;
5149 if (unlikely(!NVME_AQA_ASQS(n->bar.aqa))) {
5150 trace_pci_nvme_err_startfail_asqent_sz_zero();
5151 return -1;
5153 if (unlikely(!NVME_AQA_ACQS(n->bar.aqa))) {
5154 trace_pci_nvme_err_startfail_acqent_sz_zero();
5155 return -1;
5158 n->page_bits = page_bits;
5159 n->page_size = page_size;
5160 n->max_prp_ents = n->page_size / sizeof(uint64_t);
5161 n->cqe_size = 1 << NVME_CC_IOCQES(n->bar.cc);
5162 n->sqe_size = 1 << NVME_CC_IOSQES(n->bar.cc);
5163 nvme_init_cq(&n->admin_cq, n, n->bar.acq, 0, 0,
5164 NVME_AQA_ACQS(n->bar.aqa) + 1, 1);
5165 nvme_init_sq(&n->admin_sq, n, n->bar.asq, 0, 0,
5166 NVME_AQA_ASQS(n->bar.aqa) + 1);
5168 nvme_set_timestamp(n, 0ULL);
5170 QTAILQ_INIT(&n->aer_queue);
5172 nvme_select_ns_iocs(n);
5174 return 0;
5177 static void nvme_cmb_enable_regs(NvmeCtrl *n)
5179 NVME_CMBLOC_SET_CDPCILS(n->bar.cmbloc, 1);
5180 NVME_CMBLOC_SET_CDPMLS(n->bar.cmbloc, 1);
5181 NVME_CMBLOC_SET_BIR(n->bar.cmbloc, NVME_CMB_BIR);
5183 NVME_CMBSZ_SET_SQS(n->bar.cmbsz, 1);
5184 NVME_CMBSZ_SET_CQS(n->bar.cmbsz, 0);
5185 NVME_CMBSZ_SET_LISTS(n->bar.cmbsz, 1);
5186 NVME_CMBSZ_SET_RDS(n->bar.cmbsz, 1);
5187 NVME_CMBSZ_SET_WDS(n->bar.cmbsz, 1);
5188 NVME_CMBSZ_SET_SZU(n->bar.cmbsz, 2); /* MBs */
5189 NVME_CMBSZ_SET_SZ(n->bar.cmbsz, n->params.cmb_size_mb);
5192 static void nvme_write_bar(NvmeCtrl *n, hwaddr offset, uint64_t data,
5193 unsigned size)
5195 if (unlikely(offset & (sizeof(uint32_t) - 1))) {
5196 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_misaligned32,
5197 "MMIO write not 32-bit aligned,"
5198 " offset=0x%"PRIx64"", offset);
5199 /* should be ignored, fall through for now */
5202 if (unlikely(size < sizeof(uint32_t))) {
5203 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_toosmall,
5204 "MMIO write smaller than 32-bits,"
5205 " offset=0x%"PRIx64", size=%u",
5206 offset, size);
5207 /* should be ignored, fall through for now */
5210 switch (offset) {
5211 case 0xc: /* INTMS */
5212 if (unlikely(msix_enabled(&(n->parent_obj)))) {
5213 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_intmask_with_msix,
5214 "undefined access to interrupt mask set"
5215 " when MSI-X is enabled");
5216 /* should be ignored, fall through for now */
5218 n->bar.intms |= data & 0xffffffff;
5219 n->bar.intmc = n->bar.intms;
5220 trace_pci_nvme_mmio_intm_set(data & 0xffffffff, n->bar.intmc);
5221 nvme_irq_check(n);
5222 break;
5223 case 0x10: /* INTMC */
5224 if (unlikely(msix_enabled(&(n->parent_obj)))) {
5225 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_intmask_with_msix,
5226 "undefined access to interrupt mask clr"
5227 " when MSI-X is enabled");
5228 /* should be ignored, fall through for now */
5230 n->bar.intms &= ~(data & 0xffffffff);
5231 n->bar.intmc = n->bar.intms;
5232 trace_pci_nvme_mmio_intm_clr(data & 0xffffffff, n->bar.intmc);
5233 nvme_irq_check(n);
5234 break;
5235 case 0x14: /* CC */
5236 trace_pci_nvme_mmio_cfg(data & 0xffffffff);
5237 /* Windows first sends data, then sends enable bit */
5238 if (!NVME_CC_EN(data) && !NVME_CC_EN(n->bar.cc) &&
5239 !NVME_CC_SHN(data) && !NVME_CC_SHN(n->bar.cc))
5241 n->bar.cc = data;
5244 if (NVME_CC_EN(data) && !NVME_CC_EN(n->bar.cc)) {
5245 n->bar.cc = data;
5246 if (unlikely(nvme_start_ctrl(n))) {
5247 trace_pci_nvme_err_startfail();
5248 n->bar.csts = NVME_CSTS_FAILED;
5249 } else {
5250 trace_pci_nvme_mmio_start_success();
5251 n->bar.csts = NVME_CSTS_READY;
5253 } else if (!NVME_CC_EN(data) && NVME_CC_EN(n->bar.cc)) {
5254 trace_pci_nvme_mmio_stopped();
5255 nvme_ctrl_reset(n);
5256 n->bar.csts &= ~NVME_CSTS_READY;
5258 if (NVME_CC_SHN(data) && !(NVME_CC_SHN(n->bar.cc))) {
5259 trace_pci_nvme_mmio_shutdown_set();
5260 nvme_ctrl_shutdown(n);
5261 n->bar.cc = data;
5262 n->bar.csts |= NVME_CSTS_SHST_COMPLETE;
5263 } else if (!NVME_CC_SHN(data) && NVME_CC_SHN(n->bar.cc)) {
5264 trace_pci_nvme_mmio_shutdown_cleared();
5265 n->bar.csts &= ~NVME_CSTS_SHST_COMPLETE;
5266 n->bar.cc = data;
5268 break;
5269 case 0x1C: /* CSTS */
5270 if (data & (1 << 4)) {
5271 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_ssreset_w1c_unsupported,
5272 "attempted to W1C CSTS.NSSRO"
5273 " but CAP.NSSRS is zero (not supported)");
5274 } else if (data != 0) {
5275 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_ro_csts,
5276 "attempted to set a read only bit"
5277 " of controller status");
5279 break;
5280 case 0x20: /* NSSR */
5281 if (data == 0x4E564D65) {
5282 trace_pci_nvme_ub_mmiowr_ssreset_unsupported();
5283 } else {
5284 /* The spec says that writes of other values have no effect */
5285 return;
5287 break;
5288 case 0x24: /* AQA */
5289 n->bar.aqa = data & 0xffffffff;
5290 trace_pci_nvme_mmio_aqattr(data & 0xffffffff);
5291 break;
5292 case 0x28: /* ASQ */
5293 n->bar.asq = size == 8 ? data :
5294 (n->bar.asq & ~0xffffffffULL) | (data & 0xffffffff);
5295 trace_pci_nvme_mmio_asqaddr(data);
5296 break;
5297 case 0x2c: /* ASQ hi */
5298 n->bar.asq = (n->bar.asq & 0xffffffff) | (data << 32);
5299 trace_pci_nvme_mmio_asqaddr_hi(data, n->bar.asq);
5300 break;
5301 case 0x30: /* ACQ */
5302 trace_pci_nvme_mmio_acqaddr(data);
5303 n->bar.acq = size == 8 ? data :
5304 (n->bar.acq & ~0xffffffffULL) | (data & 0xffffffff);
5305 break;
5306 case 0x34: /* ACQ hi */
5307 n->bar.acq = (n->bar.acq & 0xffffffff) | (data << 32);
5308 trace_pci_nvme_mmio_acqaddr_hi(data, n->bar.acq);
5309 break;
5310 case 0x38: /* CMBLOC */
5311 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_cmbloc_reserved,
5312 "invalid write to reserved CMBLOC"
5313 " when CMBSZ is zero, ignored");
5314 return;
5315 case 0x3C: /* CMBSZ */
5316 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_cmbsz_readonly,
5317 "invalid write to read only CMBSZ, ignored");
5318 return;
5319 case 0x50: /* CMBMSC */
5320 if (!NVME_CAP_CMBS(n->bar.cap)) {
5321 return;
5324 n->bar.cmbmsc = size == 8 ? data :
5325 (n->bar.cmbmsc & ~0xffffffff) | (data & 0xffffffff);
5326 n->cmb.cmse = false;
5328 if (NVME_CMBMSC_CRE(data)) {
5329 nvme_cmb_enable_regs(n);
5331 if (NVME_CMBMSC_CMSE(data)) {
5332 hwaddr cba = NVME_CMBMSC_CBA(data) << CMBMSC_CBA_SHIFT;
5333 if (cba + int128_get64(n->cmb.mem.size) < cba) {
5334 NVME_CMBSTS_SET_CBAI(n->bar.cmbsts, 1);
5335 return;
5338 n->cmb.cba = cba;
5339 n->cmb.cmse = true;
5341 } else {
5342 n->bar.cmbsz = 0;
5343 n->bar.cmbloc = 0;
5346 return;
5347 case 0x54: /* CMBMSC hi */
5348 n->bar.cmbmsc = (n->bar.cmbmsc & 0xffffffff) | (data << 32);
5349 return;
5351 case 0xE00: /* PMRCAP */
5352 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrcap_readonly,
5353 "invalid write to PMRCAP register, ignored");
5354 return;
5355 case 0xE04: /* PMRCTL */
5356 n->bar.pmrctl = data;
5357 if (NVME_PMRCTL_EN(data)) {
5358 memory_region_set_enabled(&n->pmr.dev->mr, true);
5359 n->bar.pmrsts = 0;
5360 } else {
5361 memory_region_set_enabled(&n->pmr.dev->mr, false);
5362 NVME_PMRSTS_SET_NRDY(n->bar.pmrsts, 1);
5363 n->pmr.cmse = false;
5365 return;
5366 case 0xE08: /* PMRSTS */
5367 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrsts_readonly,
5368 "invalid write to PMRSTS register, ignored");
5369 return;
5370 case 0xE0C: /* PMREBS */
5371 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrebs_readonly,
5372 "invalid write to PMREBS register, ignored");
5373 return;
5374 case 0xE10: /* PMRSWTP */
5375 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrswtp_readonly,
5376 "invalid write to PMRSWTP register, ignored");
5377 return;
5378 case 0xE14: /* PMRMSCL */
5379 if (!NVME_CAP_PMRS(n->bar.cap)) {
5380 return;
5383 n->bar.pmrmsc = (n->bar.pmrmsc & ~0xffffffff) | (data & 0xffffffff);
5384 n->pmr.cmse = false;
5386 if (NVME_PMRMSC_CMSE(n->bar.pmrmsc)) {
5387 hwaddr cba = NVME_PMRMSC_CBA(n->bar.pmrmsc) << PMRMSC_CBA_SHIFT;
5388 if (cba + int128_get64(n->pmr.dev->mr.size) < cba) {
5389 NVME_PMRSTS_SET_CBAI(n->bar.pmrsts, 1);
5390 return;
5393 n->pmr.cmse = true;
5394 n->pmr.cba = cba;
5397 return;
5398 case 0xE18: /* PMRMSCU */
5399 if (!NVME_CAP_PMRS(n->bar.cap)) {
5400 return;
5403 n->bar.pmrmsc = (n->bar.pmrmsc & 0xffffffff) | (data << 32);
5404 return;
5405 default:
5406 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_invalid,
5407 "invalid MMIO write,"
5408 " offset=0x%"PRIx64", data=%"PRIx64"",
5409 offset, data);
5410 break;
5414 static uint64_t nvme_mmio_read(void *opaque, hwaddr addr, unsigned size)
5416 NvmeCtrl *n = (NvmeCtrl *)opaque;
5417 uint8_t *ptr = (uint8_t *)&n->bar;
5418 uint64_t val = 0;
5420 trace_pci_nvme_mmio_read(addr, size);
5422 if (unlikely(addr & (sizeof(uint32_t) - 1))) {
5423 NVME_GUEST_ERR(pci_nvme_ub_mmiord_misaligned32,
5424 "MMIO read not 32-bit aligned,"
5425 " offset=0x%"PRIx64"", addr);
5426 /* should RAZ, fall through for now */
5427 } else if (unlikely(size < sizeof(uint32_t))) {
5428 NVME_GUEST_ERR(pci_nvme_ub_mmiord_toosmall,
5429 "MMIO read smaller than 32-bits,"
5430 " offset=0x%"PRIx64"", addr);
5431 /* should RAZ, fall through for now */
5434 if (addr < sizeof(n->bar)) {
5436 * When PMRWBM bit 1 is set then read from
5437 * from PMRSTS should ensure prior writes
5438 * made it to persistent media
5440 if (addr == 0xE08 &&
5441 (NVME_PMRCAP_PMRWBM(n->bar.pmrcap) & 0x02)) {
5442 memory_region_msync(&n->pmr.dev->mr, 0, n->pmr.dev->size);
5444 memcpy(&val, ptr + addr, size);
5445 } else {
5446 NVME_GUEST_ERR(pci_nvme_ub_mmiord_invalid_ofs,
5447 "MMIO read beyond last register,"
5448 " offset=0x%"PRIx64", returning 0", addr);
5451 return val;
5454 static void nvme_process_db(NvmeCtrl *n, hwaddr addr, int val)
5456 uint32_t qid;
5458 if (unlikely(addr & ((1 << 2) - 1))) {
5459 NVME_GUEST_ERR(pci_nvme_ub_db_wr_misaligned,
5460 "doorbell write not 32-bit aligned,"
5461 " offset=0x%"PRIx64", ignoring", addr);
5462 return;
5465 if (((addr - 0x1000) >> 2) & 1) {
5466 /* Completion queue doorbell write */
5468 uint16_t new_head = val & 0xffff;
5469 int start_sqs;
5470 NvmeCQueue *cq;
5472 qid = (addr - (0x1000 + (1 << 2))) >> 3;
5473 if (unlikely(nvme_check_cqid(n, qid))) {
5474 NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_cq,
5475 "completion queue doorbell write"
5476 " for nonexistent queue,"
5477 " sqid=%"PRIu32", ignoring", qid);
5480 * NVM Express v1.3d, Section 4.1 state: "If host software writes
5481 * an invalid value to the Submission Queue Tail Doorbell or
5482 * Completion Queue Head Doorbell regiter and an Asynchronous Event
5483 * Request command is outstanding, then an asynchronous event is
5484 * posted to the Admin Completion Queue with a status code of
5485 * Invalid Doorbell Write Value."
5487 * Also note that the spec includes the "Invalid Doorbell Register"
5488 * status code, but nowhere does it specify when to use it.
5489 * However, it seems reasonable to use it here in a similar
5490 * fashion.
5492 if (n->outstanding_aers) {
5493 nvme_enqueue_event(n, NVME_AER_TYPE_ERROR,
5494 NVME_AER_INFO_ERR_INVALID_DB_REGISTER,
5495 NVME_LOG_ERROR_INFO);
5498 return;
5501 cq = n->cq[qid];
5502 if (unlikely(new_head >= cq->size)) {
5503 NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_cqhead,
5504 "completion queue doorbell write value"
5505 " beyond queue size, sqid=%"PRIu32","
5506 " new_head=%"PRIu16", ignoring",
5507 qid, new_head);
5509 if (n->outstanding_aers) {
5510 nvme_enqueue_event(n, NVME_AER_TYPE_ERROR,
5511 NVME_AER_INFO_ERR_INVALID_DB_VALUE,
5512 NVME_LOG_ERROR_INFO);
5515 return;
5518 trace_pci_nvme_mmio_doorbell_cq(cq->cqid, new_head);
5520 start_sqs = nvme_cq_full(cq) ? 1 : 0;
5521 cq->head = new_head;
5522 if (start_sqs) {
5523 NvmeSQueue *sq;
5524 QTAILQ_FOREACH(sq, &cq->sq_list, entry) {
5525 timer_mod(sq->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 500);
5527 timer_mod(cq->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 500);
5530 if (cq->tail == cq->head) {
5531 nvme_irq_deassert(n, cq);
5533 } else {
5534 /* Submission queue doorbell write */
5536 uint16_t new_tail = val & 0xffff;
5537 NvmeSQueue *sq;
5539 qid = (addr - 0x1000) >> 3;
5540 if (unlikely(nvme_check_sqid(n, qid))) {
5541 NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_sq,
5542 "submission queue doorbell write"
5543 " for nonexistent queue,"
5544 " sqid=%"PRIu32", ignoring", qid);
5546 if (n->outstanding_aers) {
5547 nvme_enqueue_event(n, NVME_AER_TYPE_ERROR,
5548 NVME_AER_INFO_ERR_INVALID_DB_REGISTER,
5549 NVME_LOG_ERROR_INFO);
5552 return;
5555 sq = n->sq[qid];
5556 if (unlikely(new_tail >= sq->size)) {
5557 NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_sqtail,
5558 "submission queue doorbell write value"
5559 " beyond queue size, sqid=%"PRIu32","
5560 " new_tail=%"PRIu16", ignoring",
5561 qid, new_tail);
5563 if (n->outstanding_aers) {
5564 nvme_enqueue_event(n, NVME_AER_TYPE_ERROR,
5565 NVME_AER_INFO_ERR_INVALID_DB_VALUE,
5566 NVME_LOG_ERROR_INFO);
5569 return;
5572 trace_pci_nvme_mmio_doorbell_sq(sq->sqid, new_tail);
5574 sq->tail = new_tail;
5575 timer_mod(sq->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 500);
5579 static void nvme_mmio_write(void *opaque, hwaddr addr, uint64_t data,
5580 unsigned size)
5582 NvmeCtrl *n = (NvmeCtrl *)opaque;
5584 trace_pci_nvme_mmio_write(addr, data, size);
5586 if (addr < sizeof(n->bar)) {
5587 nvme_write_bar(n, addr, data, size);
5588 } else {
5589 nvme_process_db(n, addr, data);
5593 static const MemoryRegionOps nvme_mmio_ops = {
5594 .read = nvme_mmio_read,
5595 .write = nvme_mmio_write,
5596 .endianness = DEVICE_LITTLE_ENDIAN,
5597 .impl = {
5598 .min_access_size = 2,
5599 .max_access_size = 8,
5603 static void nvme_cmb_write(void *opaque, hwaddr addr, uint64_t data,
5604 unsigned size)
5606 NvmeCtrl *n = (NvmeCtrl *)opaque;
5607 stn_le_p(&n->cmb.buf[addr], size, data);
5610 static uint64_t nvme_cmb_read(void *opaque, hwaddr addr, unsigned size)
5612 NvmeCtrl *n = (NvmeCtrl *)opaque;
5613 return ldn_le_p(&n->cmb.buf[addr], size);
5616 static const MemoryRegionOps nvme_cmb_ops = {
5617 .read = nvme_cmb_read,
5618 .write = nvme_cmb_write,
5619 .endianness = DEVICE_LITTLE_ENDIAN,
5620 .impl = {
5621 .min_access_size = 1,
5622 .max_access_size = 8,
5626 static void nvme_check_constraints(NvmeCtrl *n, Error **errp)
5628 NvmeParams *params = &n->params;
5630 if (params->num_queues) {
5631 warn_report("num_queues is deprecated; please use max_ioqpairs "
5632 "instead");
5634 params->max_ioqpairs = params->num_queues - 1;
5637 if (n->conf.blk) {
5638 warn_report("drive property is deprecated; "
5639 "please use an nvme-ns device instead");
5642 if (params->max_ioqpairs < 1 ||
5643 params->max_ioqpairs > NVME_MAX_IOQPAIRS) {
5644 error_setg(errp, "max_ioqpairs must be between 1 and %d",
5645 NVME_MAX_IOQPAIRS);
5646 return;
5649 if (params->msix_qsize < 1 ||
5650 params->msix_qsize > PCI_MSIX_FLAGS_QSIZE + 1) {
5651 error_setg(errp, "msix_qsize must be between 1 and %d",
5652 PCI_MSIX_FLAGS_QSIZE + 1);
5653 return;
5656 if (!params->serial) {
5657 error_setg(errp, "serial property not set");
5658 return;
5661 if (n->pmr.dev) {
5662 if (host_memory_backend_is_mapped(n->pmr.dev)) {
5663 error_setg(errp, "can't use already busy memdev: %s",
5664 object_get_canonical_path_component(OBJECT(n->pmr.dev)));
5665 return;
5668 if (!is_power_of_2(n->pmr.dev->size)) {
5669 error_setg(errp, "pmr backend size needs to be power of 2 in size");
5670 return;
5673 host_memory_backend_set_mapped(n->pmr.dev, true);
5676 if (n->params.zasl > n->params.mdts) {
5677 error_setg(errp, "zoned.zasl (Zone Append Size Limit) must be less "
5678 "than or equal to mdts (Maximum Data Transfer Size)");
5679 return;
5682 if (!n->params.vsl) {
5683 error_setg(errp, "vsl must be non-zero");
5684 return;
5688 static void nvme_init_state(NvmeCtrl *n)
5690 n->num_namespaces = NVME_MAX_NAMESPACES;
5691 /* add one to max_ioqpairs to account for the admin queue pair */
5692 n->reg_size = pow2ceil(sizeof(NvmeBar) +
5693 2 * (n->params.max_ioqpairs + 1) * NVME_DB_SIZE);
5694 n->sq = g_new0(NvmeSQueue *, n->params.max_ioqpairs + 1);
5695 n->cq = g_new0(NvmeCQueue *, n->params.max_ioqpairs + 1);
5696 n->temperature = NVME_TEMPERATURE;
5697 n->features.temp_thresh_hi = NVME_TEMPERATURE_WARNING;
5698 n->starttime_ms = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL);
5699 n->aer_reqs = g_new0(NvmeRequest *, n->params.aerl + 1);
5702 static int nvme_attach_namespace(NvmeCtrl *n, NvmeNamespace *ns, Error **errp)
5704 if (nvme_ns_is_attached(n, ns)) {
5705 error_setg(errp,
5706 "namespace %d is already attached to controller %d",
5707 nvme_nsid(ns), n->cntlid);
5708 return -1;
5711 nvme_ns_attach(n, ns);
5713 return 0;
5716 int nvme_register_namespace(NvmeCtrl *n, NvmeNamespace *ns, Error **errp)
5718 uint32_t nsid = nvme_nsid(ns);
5720 if (nsid > NVME_MAX_NAMESPACES) {
5721 error_setg(errp, "invalid namespace id (must be between 0 and %d)",
5722 NVME_MAX_NAMESPACES);
5723 return -1;
5726 if (!nsid) {
5727 for (int i = 1; i <= n->num_namespaces; i++) {
5728 if (!nvme_ns(n, i)) {
5729 nsid = ns->params.nsid = i;
5730 break;
5734 if (!nsid) {
5735 error_setg(errp, "no free namespace id");
5736 return -1;
5738 } else {
5739 if (n->namespaces[nsid - 1]) {
5740 error_setg(errp, "namespace id '%d' is already in use", nsid);
5741 return -1;
5745 trace_pci_nvme_register_namespace(nsid);
5748 * If subsys is not given, namespae is always attached to the controller
5749 * because there's no subsystem to manage namespace allocation.
5751 if (!n->subsys) {
5752 if (ns->params.detached) {
5753 error_setg(errp,
5754 "detached needs nvme-subsys specified nvme or nvme-ns");
5755 return -1;
5758 return nvme_attach_namespace(n, ns, errp);
5759 } else {
5760 if (!ns->params.detached) {
5761 return nvme_attach_namespace(n, ns, errp);
5765 n->dmrsl = MIN_NON_ZERO(n->dmrsl,
5766 BDRV_REQUEST_MAX_BYTES / nvme_l2b(ns, 1));
5768 return 0;
5771 static void nvme_init_cmb(NvmeCtrl *n, PCIDevice *pci_dev)
5773 uint64_t cmb_size = n->params.cmb_size_mb * MiB;
5775 n->cmb.buf = g_malloc0(cmb_size);
5776 memory_region_init_io(&n->cmb.mem, OBJECT(n), &nvme_cmb_ops, n,
5777 "nvme-cmb", cmb_size);
5778 pci_register_bar(pci_dev, NVME_CMB_BIR,
5779 PCI_BASE_ADDRESS_SPACE_MEMORY |
5780 PCI_BASE_ADDRESS_MEM_TYPE_64 |
5781 PCI_BASE_ADDRESS_MEM_PREFETCH, &n->cmb.mem);
5783 NVME_CAP_SET_CMBS(n->bar.cap, 1);
5785 if (n->params.legacy_cmb) {
5786 nvme_cmb_enable_regs(n);
5787 n->cmb.cmse = true;
5791 static void nvme_init_pmr(NvmeCtrl *n, PCIDevice *pci_dev)
5793 NVME_PMRCAP_SET_RDS(n->bar.pmrcap, 1);
5794 NVME_PMRCAP_SET_WDS(n->bar.pmrcap, 1);
5795 NVME_PMRCAP_SET_BIR(n->bar.pmrcap, NVME_PMR_BIR);
5796 /* Turn on bit 1 support */
5797 NVME_PMRCAP_SET_PMRWBM(n->bar.pmrcap, 0x02);
5798 NVME_PMRCAP_SET_CMSS(n->bar.pmrcap, 1);
5800 pci_register_bar(pci_dev, NVME_PMRCAP_BIR(n->bar.pmrcap),
5801 PCI_BASE_ADDRESS_SPACE_MEMORY |
5802 PCI_BASE_ADDRESS_MEM_TYPE_64 |
5803 PCI_BASE_ADDRESS_MEM_PREFETCH, &n->pmr.dev->mr);
5805 memory_region_set_enabled(&n->pmr.dev->mr, false);
5808 static int nvme_init_pci(NvmeCtrl *n, PCIDevice *pci_dev, Error **errp)
5810 uint8_t *pci_conf = pci_dev->config;
5811 uint64_t bar_size, msix_table_size, msix_pba_size;
5812 unsigned msix_table_offset, msix_pba_offset;
5813 int ret;
5815 Error *err = NULL;
5817 pci_conf[PCI_INTERRUPT_PIN] = 1;
5818 pci_config_set_prog_interface(pci_conf, 0x2);
5820 if (n->params.use_intel_id) {
5821 pci_config_set_vendor_id(pci_conf, PCI_VENDOR_ID_INTEL);
5822 pci_config_set_device_id(pci_conf, 0x5845);
5823 } else {
5824 pci_config_set_vendor_id(pci_conf, PCI_VENDOR_ID_REDHAT);
5825 pci_config_set_device_id(pci_conf, PCI_DEVICE_ID_REDHAT_NVME);
5828 pci_config_set_class(pci_conf, PCI_CLASS_STORAGE_EXPRESS);
5829 pcie_endpoint_cap_init(pci_dev, 0x80);
5831 bar_size = QEMU_ALIGN_UP(n->reg_size, 4 * KiB);
5832 msix_table_offset = bar_size;
5833 msix_table_size = PCI_MSIX_ENTRY_SIZE * n->params.msix_qsize;
5835 bar_size += msix_table_size;
5836 bar_size = QEMU_ALIGN_UP(bar_size, 4 * KiB);
5837 msix_pba_offset = bar_size;
5838 msix_pba_size = QEMU_ALIGN_UP(n->params.msix_qsize, 64) / 8;
5840 bar_size += msix_pba_size;
5841 bar_size = pow2ceil(bar_size);
5843 memory_region_init(&n->bar0, OBJECT(n), "nvme-bar0", bar_size);
5844 memory_region_init_io(&n->iomem, OBJECT(n), &nvme_mmio_ops, n, "nvme",
5845 n->reg_size);
5846 memory_region_add_subregion(&n->bar0, 0, &n->iomem);
5848 pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY |
5849 PCI_BASE_ADDRESS_MEM_TYPE_64, &n->bar0);
5850 ret = msix_init(pci_dev, n->params.msix_qsize,
5851 &n->bar0, 0, msix_table_offset,
5852 &n->bar0, 0, msix_pba_offset, 0, &err);
5853 if (ret < 0) {
5854 if (ret == -ENOTSUP) {
5855 warn_report_err(err);
5856 } else {
5857 error_propagate(errp, err);
5858 return ret;
5862 if (n->params.cmb_size_mb) {
5863 nvme_init_cmb(n, pci_dev);
5866 if (n->pmr.dev) {
5867 nvme_init_pmr(n, pci_dev);
5870 return 0;
5873 static void nvme_init_subnqn(NvmeCtrl *n)
5875 NvmeSubsystem *subsys = n->subsys;
5876 NvmeIdCtrl *id = &n->id_ctrl;
5878 if (!subsys) {
5879 snprintf((char *)id->subnqn, sizeof(id->subnqn),
5880 "nqn.2019-08.org.qemu:%s", n->params.serial);
5881 } else {
5882 pstrcpy((char *)id->subnqn, sizeof(id->subnqn), (char*)subsys->subnqn);
5886 static void nvme_init_ctrl(NvmeCtrl *n, PCIDevice *pci_dev)
5888 NvmeIdCtrl *id = &n->id_ctrl;
5889 uint8_t *pci_conf = pci_dev->config;
5891 id->vid = cpu_to_le16(pci_get_word(pci_conf + PCI_VENDOR_ID));
5892 id->ssvid = cpu_to_le16(pci_get_word(pci_conf + PCI_SUBSYSTEM_VENDOR_ID));
5893 strpadcpy((char *)id->mn, sizeof(id->mn), "QEMU NVMe Ctrl", ' ');
5894 strpadcpy((char *)id->fr, sizeof(id->fr), "1.0", ' ');
5895 strpadcpy((char *)id->sn, sizeof(id->sn), n->params.serial, ' ');
5897 id->cntlid = cpu_to_le16(n->cntlid);
5899 id->oaes = cpu_to_le32(NVME_OAES_NS_ATTR);
5901 id->rab = 6;
5903 if (n->params.use_intel_id) {
5904 id->ieee[0] = 0xb3;
5905 id->ieee[1] = 0x02;
5906 id->ieee[2] = 0x00;
5907 } else {
5908 id->ieee[0] = 0x00;
5909 id->ieee[1] = 0x54;
5910 id->ieee[2] = 0x52;
5913 id->mdts = n->params.mdts;
5914 id->ver = cpu_to_le32(NVME_SPEC_VER);
5915 id->oacs = cpu_to_le16(NVME_OACS_NS_MGMT);
5916 id->cntrltype = 0x1;
5919 * Because the controller always completes the Abort command immediately,
5920 * there can never be more than one concurrently executing Abort command,
5921 * so this value is never used for anything. Note that there can easily be
5922 * many Abort commands in the queues, but they are not considered
5923 * "executing" until processed by nvme_abort.
5925 * The specification recommends a value of 3 for Abort Command Limit (four
5926 * concurrently outstanding Abort commands), so lets use that though it is
5927 * inconsequential.
5929 id->acl = 3;
5930 id->aerl = n->params.aerl;
5931 id->frmw = (NVME_NUM_FW_SLOTS << 1) | NVME_FRMW_SLOT1_RO;
5932 id->lpa = NVME_LPA_NS_SMART | NVME_LPA_CSE | NVME_LPA_EXTENDED;
5934 /* recommended default value (~70 C) */
5935 id->wctemp = cpu_to_le16(NVME_TEMPERATURE_WARNING);
5936 id->cctemp = cpu_to_le16(NVME_TEMPERATURE_CRITICAL);
5938 id->sqes = (0x6 << 4) | 0x6;
5939 id->cqes = (0x4 << 4) | 0x4;
5940 id->nn = cpu_to_le32(n->num_namespaces);
5941 id->oncs = cpu_to_le16(NVME_ONCS_WRITE_ZEROES | NVME_ONCS_TIMESTAMP |
5942 NVME_ONCS_FEATURES | NVME_ONCS_DSM |
5943 NVME_ONCS_COMPARE | NVME_ONCS_COPY);
5946 * NOTE: If this device ever supports a command set that does NOT use 0x0
5947 * as a Flush-equivalent operation, support for the broadcast NSID in Flush
5948 * should probably be removed.
5950 * See comment in nvme_io_cmd.
5952 id->vwc = NVME_VWC_NSID_BROADCAST_SUPPORT | NVME_VWC_PRESENT;
5954 id->ocfs = cpu_to_le16(NVME_OCFS_COPY_FORMAT_0);
5955 id->sgls = cpu_to_le32(NVME_CTRL_SGLS_SUPPORT_NO_ALIGN |
5956 NVME_CTRL_SGLS_BITBUCKET);
5958 nvme_init_subnqn(n);
5960 id->psd[0].mp = cpu_to_le16(0x9c4);
5961 id->psd[0].enlat = cpu_to_le32(0x10);
5962 id->psd[0].exlat = cpu_to_le32(0x4);
5964 if (n->subsys) {
5965 id->cmic |= NVME_CMIC_MULTI_CTRL;
5968 NVME_CAP_SET_MQES(n->bar.cap, 0x7ff);
5969 NVME_CAP_SET_CQR(n->bar.cap, 1);
5970 NVME_CAP_SET_TO(n->bar.cap, 0xf);
5971 NVME_CAP_SET_CSS(n->bar.cap, NVME_CAP_CSS_NVM);
5972 NVME_CAP_SET_CSS(n->bar.cap, NVME_CAP_CSS_CSI_SUPP);
5973 NVME_CAP_SET_CSS(n->bar.cap, NVME_CAP_CSS_ADMIN_ONLY);
5974 NVME_CAP_SET_MPSMAX(n->bar.cap, 4);
5975 NVME_CAP_SET_CMBS(n->bar.cap, n->params.cmb_size_mb ? 1 : 0);
5976 NVME_CAP_SET_PMRS(n->bar.cap, n->pmr.dev ? 1 : 0);
5978 n->bar.vs = NVME_SPEC_VER;
5979 n->bar.intmc = n->bar.intms = 0;
5982 static int nvme_init_subsys(NvmeCtrl *n, Error **errp)
5984 int cntlid;
5986 if (!n->subsys) {
5987 return 0;
5990 cntlid = nvme_subsys_register_ctrl(n, errp);
5991 if (cntlid < 0) {
5992 return -1;
5995 n->cntlid = cntlid;
5997 return 0;
6000 static void nvme_realize(PCIDevice *pci_dev, Error **errp)
6002 NvmeCtrl *n = NVME(pci_dev);
6003 NvmeNamespace *ns;
6004 Error *local_err = NULL;
6006 nvme_check_constraints(n, &local_err);
6007 if (local_err) {
6008 error_propagate(errp, local_err);
6009 return;
6012 qbus_create_inplace(&n->bus, sizeof(NvmeBus), TYPE_NVME_BUS,
6013 &pci_dev->qdev, n->parent_obj.qdev.id);
6015 nvme_init_state(n);
6016 if (nvme_init_pci(n, pci_dev, errp)) {
6017 return;
6020 if (nvme_init_subsys(n, errp)) {
6021 error_propagate(errp, local_err);
6022 return;
6024 nvme_init_ctrl(n, pci_dev);
6026 /* setup a namespace if the controller drive property was given */
6027 if (n->namespace.blkconf.blk) {
6028 ns = &n->namespace;
6029 ns->params.nsid = 1;
6031 if (nvme_ns_setup(ns, errp)) {
6032 return;
6035 if (nvme_register_namespace(n, ns, errp)) {
6036 return;
6041 static void nvme_exit(PCIDevice *pci_dev)
6043 NvmeCtrl *n = NVME(pci_dev);
6044 NvmeNamespace *ns;
6045 int i;
6047 nvme_ctrl_reset(n);
6049 for (i = 1; i <= n->num_namespaces; i++) {
6050 ns = nvme_ns(n, i);
6051 if (!ns) {
6052 continue;
6055 nvme_ns_cleanup(ns);
6058 g_free(n->cq);
6059 g_free(n->sq);
6060 g_free(n->aer_reqs);
6062 if (n->params.cmb_size_mb) {
6063 g_free(n->cmb.buf);
6066 if (n->pmr.dev) {
6067 host_memory_backend_set_mapped(n->pmr.dev, false);
6069 msix_uninit_exclusive_bar(pci_dev);
6072 static Property nvme_props[] = {
6073 DEFINE_BLOCK_PROPERTIES(NvmeCtrl, namespace.blkconf),
6074 DEFINE_PROP_LINK("pmrdev", NvmeCtrl, pmr.dev, TYPE_MEMORY_BACKEND,
6075 HostMemoryBackend *),
6076 DEFINE_PROP_LINK("subsys", NvmeCtrl, subsys, TYPE_NVME_SUBSYS,
6077 NvmeSubsystem *),
6078 DEFINE_PROP_STRING("serial", NvmeCtrl, params.serial),
6079 DEFINE_PROP_UINT32("cmb_size_mb", NvmeCtrl, params.cmb_size_mb, 0),
6080 DEFINE_PROP_UINT32("num_queues", NvmeCtrl, params.num_queues, 0),
6081 DEFINE_PROP_UINT32("max_ioqpairs", NvmeCtrl, params.max_ioqpairs, 64),
6082 DEFINE_PROP_UINT16("msix_qsize", NvmeCtrl, params.msix_qsize, 65),
6083 DEFINE_PROP_UINT8("aerl", NvmeCtrl, params.aerl, 3),
6084 DEFINE_PROP_UINT32("aer_max_queued", NvmeCtrl, params.aer_max_queued, 64),
6085 DEFINE_PROP_UINT8("mdts", NvmeCtrl, params.mdts, 7),
6086 DEFINE_PROP_UINT8("vsl", NvmeCtrl, params.vsl, 7),
6087 DEFINE_PROP_BOOL("use-intel-id", NvmeCtrl, params.use_intel_id, false),
6088 DEFINE_PROP_BOOL("legacy-cmb", NvmeCtrl, params.legacy_cmb, false),
6089 DEFINE_PROP_UINT8("zoned.zasl", NvmeCtrl, params.zasl, 0),
6090 DEFINE_PROP_END_OF_LIST(),
6093 static void nvme_get_smart_warning(Object *obj, Visitor *v, const char *name,
6094 void *opaque, Error **errp)
6096 NvmeCtrl *n = NVME(obj);
6097 uint8_t value = n->smart_critical_warning;
6099 visit_type_uint8(v, name, &value, errp);
6102 static void nvme_set_smart_warning(Object *obj, Visitor *v, const char *name,
6103 void *opaque, Error **errp)
6105 NvmeCtrl *n = NVME(obj);
6106 uint8_t value, old_value, cap = 0, index, event;
6108 if (!visit_type_uint8(v, name, &value, errp)) {
6109 return;
6112 cap = NVME_SMART_SPARE | NVME_SMART_TEMPERATURE | NVME_SMART_RELIABILITY
6113 | NVME_SMART_MEDIA_READ_ONLY | NVME_SMART_FAILED_VOLATILE_MEDIA;
6114 if (NVME_CAP_PMRS(n->bar.cap)) {
6115 cap |= NVME_SMART_PMR_UNRELIABLE;
6118 if ((value & cap) != value) {
6119 error_setg(errp, "unsupported smart critical warning bits: 0x%x",
6120 value & ~cap);
6121 return;
6124 old_value = n->smart_critical_warning;
6125 n->smart_critical_warning = value;
6127 /* only inject new bits of smart critical warning */
6128 for (index = 0; index < NVME_SMART_WARN_MAX; index++) {
6129 event = 1 << index;
6130 if (value & ~old_value & event)
6131 nvme_smart_event(n, event);
6135 static const VMStateDescription nvme_vmstate = {
6136 .name = "nvme",
6137 .unmigratable = 1,
6140 static void nvme_class_init(ObjectClass *oc, void *data)
6142 DeviceClass *dc = DEVICE_CLASS(oc);
6143 PCIDeviceClass *pc = PCI_DEVICE_CLASS(oc);
6145 pc->realize = nvme_realize;
6146 pc->exit = nvme_exit;
6147 pc->class_id = PCI_CLASS_STORAGE_EXPRESS;
6148 pc->revision = 2;
6150 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
6151 dc->desc = "Non-Volatile Memory Express";
6152 device_class_set_props(dc, nvme_props);
6153 dc->vmsd = &nvme_vmstate;
6156 static void nvme_instance_init(Object *obj)
6158 NvmeCtrl *n = NVME(obj);
6160 if (n->namespace.blkconf.blk) {
6161 device_add_bootindex_property(obj, &n->namespace.blkconf.bootindex,
6162 "bootindex", "/namespace@1,0",
6163 DEVICE(obj));
6166 object_property_add(obj, "smart_critical_warning", "uint8",
6167 nvme_get_smart_warning,
6168 nvme_set_smart_warning, NULL, NULL);
6171 static const TypeInfo nvme_info = {
6172 .name = TYPE_NVME,
6173 .parent = TYPE_PCI_DEVICE,
6174 .instance_size = sizeof(NvmeCtrl),
6175 .instance_init = nvme_instance_init,
6176 .class_init = nvme_class_init,
6177 .interfaces = (InterfaceInfo[]) {
6178 { INTERFACE_PCIE_DEVICE },
6183 static const TypeInfo nvme_bus_info = {
6184 .name = TYPE_NVME_BUS,
6185 .parent = TYPE_BUS,
6186 .instance_size = sizeof(NvmeBus),
6189 static void nvme_register_types(void)
6191 type_register_static(&nvme_info);
6192 type_register_static(&nvme_bus_info);
6195 type_init(nvme_register_types)