target/mips: Introduce decodetree helpers for MSA LSA/DLSA opcodes
[qemu/ar7.git] / hw / block / nvme.c
blob27d2c72716ebbca6f8789fa6b738f9679972f538
1 /*
2 * QEMU NVM Express Controller
4 * Copyright (c) 2012, Intel Corporation
6 * Written by Keith Busch <keith.busch@intel.com>
8 * This code is licensed under the GNU GPL v2 or later.
9 */
11 /**
12 * Reference Specs: http://www.nvmexpress.org, 1.2, 1.1, 1.0e
14 * https://nvmexpress.org/developers/nvme-specification/
17 /**
18 * Usage: add options:
19 * -drive file=<file>,if=none,id=<drive_id>
20 * -device nvme,serial=<serial>,id=<bus_name>, \
21 * cmb_size_mb=<cmb_size_mb[optional]>, \
22 * [pmrdev=<mem_backend_file_id>,] \
23 * max_ioqpairs=<N[optional]>, \
24 * aerl=<N[optional]>, aer_max_queued=<N[optional]>, \
25 * mdts=<N[optional]>
26 * -device nvme-ns,drive=<drive_id>,bus=bus_name,nsid=<nsid>
28 * Note cmb_size_mb denotes size of CMB in MB. CMB is assumed to be at
29 * offset 0 in BAR2 and supports only WDS, RDS and SQS for now.
31 * cmb_size_mb= and pmrdev= options are mutually exclusive due to limitation
32 * in available BAR's. cmb_size_mb= will take precedence over pmrdev= when
33 * both provided.
34 * Enabling pmr emulation can be achieved by pointing to memory-backend-file.
35 * For example:
36 * -object memory-backend-file,id=<mem_id>,share=on,mem-path=<file_path>, \
37 * size=<size> .... -device nvme,...,pmrdev=<mem_id>
40 * nvme device parameters
41 * ~~~~~~~~~~~~~~~~~~~~~~
42 * - `aerl`
43 * The Asynchronous Event Request Limit (AERL). Indicates the maximum number
44 * of concurrently outstanding Asynchronous Event Request commands suppoert
45 * by the controller. This is a 0's based value.
47 * - `aer_max_queued`
48 * This is the maximum number of events that the device will enqueue for
49 * completion when there are no oustanding AERs. When the maximum number of
50 * enqueued events are reached, subsequent events will be dropped.
54 #include "qemu/osdep.h"
55 #include "qemu/units.h"
56 #include "qemu/error-report.h"
57 #include "hw/block/block.h"
58 #include "hw/pci/msix.h"
59 #include "hw/pci/pci.h"
60 #include "hw/qdev-properties.h"
61 #include "migration/vmstate.h"
62 #include "sysemu/sysemu.h"
63 #include "qapi/error.h"
64 #include "qapi/visitor.h"
65 #include "sysemu/hostmem.h"
66 #include "sysemu/block-backend.h"
67 #include "exec/memory.h"
68 #include "qemu/log.h"
69 #include "qemu/module.h"
70 #include "qemu/cutils.h"
71 #include "trace.h"
72 #include "nvme.h"
73 #include "nvme-ns.h"
75 #define NVME_MAX_IOQPAIRS 0xffff
76 #define NVME_DB_SIZE 4
77 #define NVME_SPEC_VER 0x00010300
78 #define NVME_CMB_BIR 2
79 #define NVME_PMR_BIR 2
80 #define NVME_TEMPERATURE 0x143
81 #define NVME_TEMPERATURE_WARNING 0x157
82 #define NVME_TEMPERATURE_CRITICAL 0x175
83 #define NVME_NUM_FW_SLOTS 1
85 #define NVME_GUEST_ERR(trace, fmt, ...) \
86 do { \
87 (trace_##trace)(__VA_ARGS__); \
88 qemu_log_mask(LOG_GUEST_ERROR, #trace \
89 " in %s: " fmt "\n", __func__, ## __VA_ARGS__); \
90 } while (0)
92 static const bool nvme_feature_support[NVME_FID_MAX] = {
93 [NVME_ARBITRATION] = true,
94 [NVME_POWER_MANAGEMENT] = true,
95 [NVME_TEMPERATURE_THRESHOLD] = true,
96 [NVME_ERROR_RECOVERY] = true,
97 [NVME_VOLATILE_WRITE_CACHE] = true,
98 [NVME_NUMBER_OF_QUEUES] = true,
99 [NVME_INTERRUPT_COALESCING] = true,
100 [NVME_INTERRUPT_VECTOR_CONF] = true,
101 [NVME_WRITE_ATOMICITY] = true,
102 [NVME_ASYNCHRONOUS_EVENT_CONF] = true,
103 [NVME_TIMESTAMP] = true,
106 static const uint32_t nvme_feature_cap[NVME_FID_MAX] = {
107 [NVME_TEMPERATURE_THRESHOLD] = NVME_FEAT_CAP_CHANGE,
108 [NVME_VOLATILE_WRITE_CACHE] = NVME_FEAT_CAP_CHANGE,
109 [NVME_NUMBER_OF_QUEUES] = NVME_FEAT_CAP_CHANGE,
110 [NVME_ASYNCHRONOUS_EVENT_CONF] = NVME_FEAT_CAP_CHANGE,
111 [NVME_TIMESTAMP] = NVME_FEAT_CAP_CHANGE,
114 static void nvme_process_sq(void *opaque);
116 static uint16_t nvme_cid(NvmeRequest *req)
118 if (!req) {
119 return 0xffff;
122 return le16_to_cpu(req->cqe.cid);
125 static uint16_t nvme_sqid(NvmeRequest *req)
127 return le16_to_cpu(req->sq->sqid);
130 static bool nvme_addr_is_cmb(NvmeCtrl *n, hwaddr addr)
132 hwaddr low = n->ctrl_mem.addr;
133 hwaddr hi = n->ctrl_mem.addr + int128_get64(n->ctrl_mem.size);
135 return addr >= low && addr < hi;
138 static inline void *nvme_addr_to_cmb(NvmeCtrl *n, hwaddr addr)
140 assert(nvme_addr_is_cmb(n, addr));
142 return &n->cmbuf[addr - n->ctrl_mem.addr];
145 static int nvme_addr_read(NvmeCtrl *n, hwaddr addr, void *buf, int size)
147 hwaddr hi = addr + size - 1;
148 if (hi < addr) {
149 return 1;
152 if (n->bar.cmbsz && nvme_addr_is_cmb(n, addr) && nvme_addr_is_cmb(n, hi)) {
153 memcpy(buf, nvme_addr_to_cmb(n, addr), size);
154 return 0;
157 return pci_dma_read(&n->parent_obj, addr, buf, size);
160 static bool nvme_nsid_valid(NvmeCtrl *n, uint32_t nsid)
162 return nsid && (nsid == NVME_NSID_BROADCAST || nsid <= n->num_namespaces);
165 static int nvme_check_sqid(NvmeCtrl *n, uint16_t sqid)
167 return sqid < n->params.max_ioqpairs + 1 && n->sq[sqid] != NULL ? 0 : -1;
170 static int nvme_check_cqid(NvmeCtrl *n, uint16_t cqid)
172 return cqid < n->params.max_ioqpairs + 1 && n->cq[cqid] != NULL ? 0 : -1;
175 static void nvme_inc_cq_tail(NvmeCQueue *cq)
177 cq->tail++;
178 if (cq->tail >= cq->size) {
179 cq->tail = 0;
180 cq->phase = !cq->phase;
184 static void nvme_inc_sq_head(NvmeSQueue *sq)
186 sq->head = (sq->head + 1) % sq->size;
189 static uint8_t nvme_cq_full(NvmeCQueue *cq)
191 return (cq->tail + 1) % cq->size == cq->head;
194 static uint8_t nvme_sq_empty(NvmeSQueue *sq)
196 return sq->head == sq->tail;
199 static void nvme_irq_check(NvmeCtrl *n)
201 if (msix_enabled(&(n->parent_obj))) {
202 return;
204 if (~n->bar.intms & n->irq_status) {
205 pci_irq_assert(&n->parent_obj);
206 } else {
207 pci_irq_deassert(&n->parent_obj);
211 static void nvme_irq_assert(NvmeCtrl *n, NvmeCQueue *cq)
213 if (cq->irq_enabled) {
214 if (msix_enabled(&(n->parent_obj))) {
215 trace_pci_nvme_irq_msix(cq->vector);
216 msix_notify(&(n->parent_obj), cq->vector);
217 } else {
218 trace_pci_nvme_irq_pin();
219 assert(cq->vector < 32);
220 n->irq_status |= 1 << cq->vector;
221 nvme_irq_check(n);
223 } else {
224 trace_pci_nvme_irq_masked();
228 static void nvme_irq_deassert(NvmeCtrl *n, NvmeCQueue *cq)
230 if (cq->irq_enabled) {
231 if (msix_enabled(&(n->parent_obj))) {
232 return;
233 } else {
234 assert(cq->vector < 32);
235 n->irq_status &= ~(1 << cq->vector);
236 nvme_irq_check(n);
241 static void nvme_req_clear(NvmeRequest *req)
243 req->ns = NULL;
244 memset(&req->cqe, 0x0, sizeof(req->cqe));
245 req->status = NVME_SUCCESS;
248 static void nvme_req_exit(NvmeRequest *req)
250 if (req->qsg.sg) {
251 qemu_sglist_destroy(&req->qsg);
254 if (req->iov.iov) {
255 qemu_iovec_destroy(&req->iov);
259 static uint16_t nvme_map_addr_cmb(NvmeCtrl *n, QEMUIOVector *iov, hwaddr addr,
260 size_t len)
262 if (!len) {
263 return NVME_SUCCESS;
266 trace_pci_nvme_map_addr_cmb(addr, len);
268 if (!nvme_addr_is_cmb(n, addr) || !nvme_addr_is_cmb(n, addr + len - 1)) {
269 return NVME_DATA_TRAS_ERROR;
272 qemu_iovec_add(iov, nvme_addr_to_cmb(n, addr), len);
274 return NVME_SUCCESS;
277 static uint16_t nvme_map_addr(NvmeCtrl *n, QEMUSGList *qsg, QEMUIOVector *iov,
278 hwaddr addr, size_t len)
280 if (!len) {
281 return NVME_SUCCESS;
284 trace_pci_nvme_map_addr(addr, len);
286 if (nvme_addr_is_cmb(n, addr)) {
287 if (qsg && qsg->sg) {
288 return NVME_INVALID_USE_OF_CMB | NVME_DNR;
291 assert(iov);
293 if (!iov->iov) {
294 qemu_iovec_init(iov, 1);
297 return nvme_map_addr_cmb(n, iov, addr, len);
300 if (iov && iov->iov) {
301 return NVME_INVALID_USE_OF_CMB | NVME_DNR;
304 assert(qsg);
306 if (!qsg->sg) {
307 pci_dma_sglist_init(qsg, &n->parent_obj, 1);
310 qemu_sglist_add(qsg, addr, len);
312 return NVME_SUCCESS;
315 static uint16_t nvme_map_prp(NvmeCtrl *n, uint64_t prp1, uint64_t prp2,
316 uint32_t len, NvmeRequest *req)
318 hwaddr trans_len = n->page_size - (prp1 % n->page_size);
319 trans_len = MIN(len, trans_len);
320 int num_prps = (len >> n->page_bits) + 1;
321 uint16_t status;
322 bool prp_list_in_cmb = false;
323 int ret;
325 QEMUSGList *qsg = &req->qsg;
326 QEMUIOVector *iov = &req->iov;
328 trace_pci_nvme_map_prp(trans_len, len, prp1, prp2, num_prps);
330 if (nvme_addr_is_cmb(n, prp1)) {
331 qemu_iovec_init(iov, num_prps);
332 } else {
333 pci_dma_sglist_init(qsg, &n->parent_obj, num_prps);
336 status = nvme_map_addr(n, qsg, iov, prp1, trans_len);
337 if (status) {
338 return status;
341 len -= trans_len;
342 if (len) {
343 if (len > n->page_size) {
344 uint64_t prp_list[n->max_prp_ents];
345 uint32_t nents, prp_trans;
346 int i = 0;
348 if (nvme_addr_is_cmb(n, prp2)) {
349 prp_list_in_cmb = true;
352 nents = (len + n->page_size - 1) >> n->page_bits;
353 prp_trans = MIN(n->max_prp_ents, nents) * sizeof(uint64_t);
354 ret = nvme_addr_read(n, prp2, (void *)prp_list, prp_trans);
355 if (ret) {
356 trace_pci_nvme_err_addr_read(prp2);
357 return NVME_DATA_TRAS_ERROR;
359 while (len != 0) {
360 uint64_t prp_ent = le64_to_cpu(prp_list[i]);
362 if (i == n->max_prp_ents - 1 && len > n->page_size) {
363 if (unlikely(prp_ent & (n->page_size - 1))) {
364 trace_pci_nvme_err_invalid_prplist_ent(prp_ent);
365 return NVME_INVALID_PRP_OFFSET | NVME_DNR;
368 if (prp_list_in_cmb != nvme_addr_is_cmb(n, prp_ent)) {
369 return NVME_INVALID_USE_OF_CMB | NVME_DNR;
372 i = 0;
373 nents = (len + n->page_size - 1) >> n->page_bits;
374 prp_trans = MIN(n->max_prp_ents, nents) * sizeof(uint64_t);
375 ret = nvme_addr_read(n, prp_ent, (void *)prp_list,
376 prp_trans);
377 if (ret) {
378 trace_pci_nvme_err_addr_read(prp_ent);
379 return NVME_DATA_TRAS_ERROR;
381 prp_ent = le64_to_cpu(prp_list[i]);
384 if (unlikely(prp_ent & (n->page_size - 1))) {
385 trace_pci_nvme_err_invalid_prplist_ent(prp_ent);
386 return NVME_INVALID_PRP_OFFSET | NVME_DNR;
389 trans_len = MIN(len, n->page_size);
390 status = nvme_map_addr(n, qsg, iov, prp_ent, trans_len);
391 if (status) {
392 return status;
395 len -= trans_len;
396 i++;
398 } else {
399 if (unlikely(prp2 & (n->page_size - 1))) {
400 trace_pci_nvme_err_invalid_prp2_align(prp2);
401 return NVME_INVALID_PRP_OFFSET | NVME_DNR;
403 status = nvme_map_addr(n, qsg, iov, prp2, len);
404 if (status) {
405 return status;
410 return NVME_SUCCESS;
414 * Map 'nsgld' data descriptors from 'segment'. The function will subtract the
415 * number of bytes mapped in len.
417 static uint16_t nvme_map_sgl_data(NvmeCtrl *n, QEMUSGList *qsg,
418 QEMUIOVector *iov,
419 NvmeSglDescriptor *segment, uint64_t nsgld,
420 size_t *len, NvmeRequest *req)
422 dma_addr_t addr, trans_len;
423 uint32_t dlen;
424 uint16_t status;
426 for (int i = 0; i < nsgld; i++) {
427 uint8_t type = NVME_SGL_TYPE(segment[i].type);
429 switch (type) {
430 case NVME_SGL_DESCR_TYPE_BIT_BUCKET:
431 if (req->cmd.opcode == NVME_CMD_WRITE) {
432 continue;
434 case NVME_SGL_DESCR_TYPE_DATA_BLOCK:
435 break;
436 case NVME_SGL_DESCR_TYPE_SEGMENT:
437 case NVME_SGL_DESCR_TYPE_LAST_SEGMENT:
438 return NVME_INVALID_NUM_SGL_DESCRS | NVME_DNR;
439 default:
440 return NVME_SGL_DESCR_TYPE_INVALID | NVME_DNR;
443 dlen = le32_to_cpu(segment[i].len);
445 if (!dlen) {
446 continue;
449 if (*len == 0) {
451 * All data has been mapped, but the SGL contains additional
452 * segments and/or descriptors. The controller might accept
453 * ignoring the rest of the SGL.
455 uint32_t sgls = le32_to_cpu(n->id_ctrl.sgls);
456 if (sgls & NVME_CTRL_SGLS_EXCESS_LENGTH) {
457 break;
460 trace_pci_nvme_err_invalid_sgl_excess_length(nvme_cid(req));
461 return NVME_DATA_SGL_LEN_INVALID | NVME_DNR;
464 trans_len = MIN(*len, dlen);
466 if (type == NVME_SGL_DESCR_TYPE_BIT_BUCKET) {
467 goto next;
470 addr = le64_to_cpu(segment[i].addr);
472 if (UINT64_MAX - addr < dlen) {
473 return NVME_DATA_SGL_LEN_INVALID | NVME_DNR;
476 status = nvme_map_addr(n, qsg, iov, addr, trans_len);
477 if (status) {
478 return status;
481 next:
482 *len -= trans_len;
485 return NVME_SUCCESS;
488 static uint16_t nvme_map_sgl(NvmeCtrl *n, QEMUSGList *qsg, QEMUIOVector *iov,
489 NvmeSglDescriptor sgl, size_t len,
490 NvmeRequest *req)
493 * Read the segment in chunks of 256 descriptors (one 4k page) to avoid
494 * dynamically allocating a potentially huge SGL. The spec allows the SGL
495 * to be larger (as in number of bytes required to describe the SGL
496 * descriptors and segment chain) than the command transfer size, so it is
497 * not bounded by MDTS.
499 const int SEG_CHUNK_SIZE = 256;
501 NvmeSglDescriptor segment[SEG_CHUNK_SIZE], *sgld, *last_sgld;
502 uint64_t nsgld;
503 uint32_t seg_len;
504 uint16_t status;
505 bool sgl_in_cmb = false;
506 hwaddr addr;
507 int ret;
509 sgld = &sgl;
510 addr = le64_to_cpu(sgl.addr);
512 trace_pci_nvme_map_sgl(nvme_cid(req), NVME_SGL_TYPE(sgl.type), len);
515 * If the entire transfer can be described with a single data block it can
516 * be mapped directly.
518 if (NVME_SGL_TYPE(sgl.type) == NVME_SGL_DESCR_TYPE_DATA_BLOCK) {
519 status = nvme_map_sgl_data(n, qsg, iov, sgld, 1, &len, req);
520 if (status) {
521 goto unmap;
524 goto out;
528 * If the segment is located in the CMB, the submission queue of the
529 * request must also reside there.
531 if (nvme_addr_is_cmb(n, addr)) {
532 if (!nvme_addr_is_cmb(n, req->sq->dma_addr)) {
533 return NVME_INVALID_USE_OF_CMB | NVME_DNR;
536 sgl_in_cmb = true;
539 for (;;) {
540 switch (NVME_SGL_TYPE(sgld->type)) {
541 case NVME_SGL_DESCR_TYPE_SEGMENT:
542 case NVME_SGL_DESCR_TYPE_LAST_SEGMENT:
543 break;
544 default:
545 return NVME_INVALID_SGL_SEG_DESCR | NVME_DNR;
548 seg_len = le32_to_cpu(sgld->len);
550 /* check the length of the (Last) Segment descriptor */
551 if ((!seg_len || seg_len & 0xf) &&
552 (NVME_SGL_TYPE(sgld->type) != NVME_SGL_DESCR_TYPE_BIT_BUCKET)) {
553 return NVME_INVALID_SGL_SEG_DESCR | NVME_DNR;
556 if (UINT64_MAX - addr < seg_len) {
557 return NVME_DATA_SGL_LEN_INVALID | NVME_DNR;
560 nsgld = seg_len / sizeof(NvmeSglDescriptor);
562 while (nsgld > SEG_CHUNK_SIZE) {
563 if (nvme_addr_read(n, addr, segment, sizeof(segment))) {
564 trace_pci_nvme_err_addr_read(addr);
565 status = NVME_DATA_TRAS_ERROR;
566 goto unmap;
569 status = nvme_map_sgl_data(n, qsg, iov, segment, SEG_CHUNK_SIZE,
570 &len, req);
571 if (status) {
572 goto unmap;
575 nsgld -= SEG_CHUNK_SIZE;
576 addr += SEG_CHUNK_SIZE * sizeof(NvmeSglDescriptor);
579 ret = nvme_addr_read(n, addr, segment, nsgld *
580 sizeof(NvmeSglDescriptor));
581 if (ret) {
582 trace_pci_nvme_err_addr_read(addr);
583 status = NVME_DATA_TRAS_ERROR;
584 goto unmap;
587 last_sgld = &segment[nsgld - 1];
590 * If the segment ends with a Data Block or Bit Bucket Descriptor Type,
591 * then we are done.
593 switch (NVME_SGL_TYPE(last_sgld->type)) {
594 case NVME_SGL_DESCR_TYPE_DATA_BLOCK:
595 case NVME_SGL_DESCR_TYPE_BIT_BUCKET:
596 status = nvme_map_sgl_data(n, qsg, iov, segment, nsgld, &len, req);
597 if (status) {
598 goto unmap;
601 goto out;
603 default:
604 break;
608 * If the last descriptor was not a Data Block or Bit Bucket, then the
609 * current segment must not be a Last Segment.
611 if (NVME_SGL_TYPE(sgld->type) == NVME_SGL_DESCR_TYPE_LAST_SEGMENT) {
612 status = NVME_INVALID_SGL_SEG_DESCR | NVME_DNR;
613 goto unmap;
616 sgld = last_sgld;
617 addr = le64_to_cpu(sgld->addr);
620 * Do not map the last descriptor; it will be a Segment or Last Segment
621 * descriptor and is handled by the next iteration.
623 status = nvme_map_sgl_data(n, qsg, iov, segment, nsgld - 1, &len, req);
624 if (status) {
625 goto unmap;
629 * If the next segment is in the CMB, make sure that the sgl was
630 * already located there.
632 if (sgl_in_cmb != nvme_addr_is_cmb(n, addr)) {
633 status = NVME_INVALID_USE_OF_CMB | NVME_DNR;
634 goto unmap;
638 out:
639 /* if there is any residual left in len, the SGL was too short */
640 if (len) {
641 status = NVME_DATA_SGL_LEN_INVALID | NVME_DNR;
642 goto unmap;
645 return NVME_SUCCESS;
647 unmap:
648 if (iov->iov) {
649 qemu_iovec_destroy(iov);
652 if (qsg->sg) {
653 qemu_sglist_destroy(qsg);
656 return status;
659 static uint16_t nvme_map_dptr(NvmeCtrl *n, size_t len, NvmeRequest *req)
661 uint64_t prp1, prp2;
663 switch (NVME_CMD_FLAGS_PSDT(req->cmd.flags)) {
664 case NVME_PSDT_PRP:
665 prp1 = le64_to_cpu(req->cmd.dptr.prp1);
666 prp2 = le64_to_cpu(req->cmd.dptr.prp2);
668 return nvme_map_prp(n, prp1, prp2, len, req);
669 case NVME_PSDT_SGL_MPTR_CONTIGUOUS:
670 case NVME_PSDT_SGL_MPTR_SGL:
671 /* SGLs shall not be used for Admin commands in NVMe over PCIe */
672 if (!req->sq->sqid) {
673 return NVME_INVALID_FIELD | NVME_DNR;
676 return nvme_map_sgl(n, &req->qsg, &req->iov, req->cmd.dptr.sgl, len,
677 req);
678 default:
679 return NVME_INVALID_FIELD;
683 static uint16_t nvme_dma(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
684 DMADirection dir, NvmeRequest *req)
686 uint16_t status = NVME_SUCCESS;
688 status = nvme_map_dptr(n, len, req);
689 if (status) {
690 return status;
693 /* assert that only one of qsg and iov carries data */
694 assert((req->qsg.nsg > 0) != (req->iov.niov > 0));
696 if (req->qsg.nsg > 0) {
697 uint64_t residual;
699 if (dir == DMA_DIRECTION_TO_DEVICE) {
700 residual = dma_buf_write(ptr, len, &req->qsg);
701 } else {
702 residual = dma_buf_read(ptr, len, &req->qsg);
705 if (unlikely(residual)) {
706 trace_pci_nvme_err_invalid_dma();
707 status = NVME_INVALID_FIELD | NVME_DNR;
709 } else {
710 size_t bytes;
712 if (dir == DMA_DIRECTION_TO_DEVICE) {
713 bytes = qemu_iovec_to_buf(&req->iov, 0, ptr, len);
714 } else {
715 bytes = qemu_iovec_from_buf(&req->iov, 0, ptr, len);
718 if (unlikely(bytes != len)) {
719 trace_pci_nvme_err_invalid_dma();
720 status = NVME_INVALID_FIELD | NVME_DNR;
724 return status;
727 static void nvme_post_cqes(void *opaque)
729 NvmeCQueue *cq = opaque;
730 NvmeCtrl *n = cq->ctrl;
731 NvmeRequest *req, *next;
732 int ret;
734 QTAILQ_FOREACH_SAFE(req, &cq->req_list, entry, next) {
735 NvmeSQueue *sq;
736 hwaddr addr;
738 if (nvme_cq_full(cq)) {
739 break;
742 sq = req->sq;
743 req->cqe.status = cpu_to_le16((req->status << 1) | cq->phase);
744 req->cqe.sq_id = cpu_to_le16(sq->sqid);
745 req->cqe.sq_head = cpu_to_le16(sq->head);
746 addr = cq->dma_addr + cq->tail * n->cqe_size;
747 ret = pci_dma_write(&n->parent_obj, addr, (void *)&req->cqe,
748 sizeof(req->cqe));
749 if (ret) {
750 trace_pci_nvme_err_addr_write(addr);
751 trace_pci_nvme_err_cfs();
752 n->bar.csts = NVME_CSTS_FAILED;
753 break;
755 QTAILQ_REMOVE(&cq->req_list, req, entry);
756 nvme_inc_cq_tail(cq);
757 nvme_req_exit(req);
758 QTAILQ_INSERT_TAIL(&sq->req_list, req, entry);
760 if (cq->tail != cq->head) {
761 nvme_irq_assert(n, cq);
765 static void nvme_enqueue_req_completion(NvmeCQueue *cq, NvmeRequest *req)
767 assert(cq->cqid == req->sq->cqid);
768 trace_pci_nvme_enqueue_req_completion(nvme_cid(req), cq->cqid,
769 req->status);
771 if (req->status) {
772 trace_pci_nvme_err_req_status(nvme_cid(req), nvme_nsid(req->ns),
773 req->status, req->cmd.opcode);
776 QTAILQ_REMOVE(&req->sq->out_req_list, req, entry);
777 QTAILQ_INSERT_TAIL(&cq->req_list, req, entry);
778 timer_mod(cq->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 500);
781 static void nvme_process_aers(void *opaque)
783 NvmeCtrl *n = opaque;
784 NvmeAsyncEvent *event, *next;
786 trace_pci_nvme_process_aers(n->aer_queued);
788 QTAILQ_FOREACH_SAFE(event, &n->aer_queue, entry, next) {
789 NvmeRequest *req;
790 NvmeAerResult *result;
792 /* can't post cqe if there is nothing to complete */
793 if (!n->outstanding_aers) {
794 trace_pci_nvme_no_outstanding_aers();
795 break;
798 /* ignore if masked (cqe posted, but event not cleared) */
799 if (n->aer_mask & (1 << event->result.event_type)) {
800 trace_pci_nvme_aer_masked(event->result.event_type, n->aer_mask);
801 continue;
804 QTAILQ_REMOVE(&n->aer_queue, event, entry);
805 n->aer_queued--;
807 n->aer_mask |= 1 << event->result.event_type;
808 n->outstanding_aers--;
810 req = n->aer_reqs[n->outstanding_aers];
812 result = (NvmeAerResult *) &req->cqe.result;
813 result->event_type = event->result.event_type;
814 result->event_info = event->result.event_info;
815 result->log_page = event->result.log_page;
816 g_free(event);
818 trace_pci_nvme_aer_post_cqe(result->event_type, result->event_info,
819 result->log_page);
821 nvme_enqueue_req_completion(&n->admin_cq, req);
825 static void nvme_enqueue_event(NvmeCtrl *n, uint8_t event_type,
826 uint8_t event_info, uint8_t log_page)
828 NvmeAsyncEvent *event;
830 trace_pci_nvme_enqueue_event(event_type, event_info, log_page);
832 if (n->aer_queued == n->params.aer_max_queued) {
833 trace_pci_nvme_enqueue_event_noqueue(n->aer_queued);
834 return;
837 event = g_new(NvmeAsyncEvent, 1);
838 event->result = (NvmeAerResult) {
839 .event_type = event_type,
840 .event_info = event_info,
841 .log_page = log_page,
844 QTAILQ_INSERT_TAIL(&n->aer_queue, event, entry);
845 n->aer_queued++;
847 nvme_process_aers(n);
850 static void nvme_clear_events(NvmeCtrl *n, uint8_t event_type)
852 n->aer_mask &= ~(1 << event_type);
853 if (!QTAILQ_EMPTY(&n->aer_queue)) {
854 nvme_process_aers(n);
858 static inline uint16_t nvme_check_mdts(NvmeCtrl *n, size_t len)
860 uint8_t mdts = n->params.mdts;
862 if (mdts && len > n->page_size << mdts) {
863 return NVME_INVALID_FIELD | NVME_DNR;
866 return NVME_SUCCESS;
869 static inline uint16_t nvme_check_bounds(NvmeCtrl *n, NvmeNamespace *ns,
870 uint64_t slba, uint32_t nlb)
872 uint64_t nsze = le64_to_cpu(ns->id_ns.nsze);
874 if (unlikely(UINT64_MAX - slba < nlb || slba + nlb > nsze)) {
875 return NVME_LBA_RANGE | NVME_DNR;
878 return NVME_SUCCESS;
881 static void nvme_rw_cb(void *opaque, int ret)
883 NvmeRequest *req = opaque;
884 NvmeNamespace *ns = req->ns;
886 BlockBackend *blk = ns->blkconf.blk;
887 BlockAcctCookie *acct = &req->acct;
888 BlockAcctStats *stats = blk_get_stats(blk);
890 Error *local_err = NULL;
892 trace_pci_nvme_rw_cb(nvme_cid(req), blk_name(blk));
894 if (!ret) {
895 block_acct_done(stats, acct);
896 } else {
897 uint16_t status;
899 block_acct_failed(stats, acct);
901 switch (req->cmd.opcode) {
902 case NVME_CMD_READ:
903 status = NVME_UNRECOVERED_READ;
904 break;
905 case NVME_CMD_FLUSH:
906 case NVME_CMD_WRITE:
907 case NVME_CMD_WRITE_ZEROES:
908 status = NVME_WRITE_FAULT;
909 break;
910 default:
911 status = NVME_INTERNAL_DEV_ERROR;
912 break;
915 trace_pci_nvme_err_aio(nvme_cid(req), strerror(ret), status);
917 error_setg_errno(&local_err, -ret, "aio failed");
918 error_report_err(local_err);
920 req->status = status;
923 nvme_enqueue_req_completion(nvme_cq(req), req);
926 static uint16_t nvme_flush(NvmeCtrl *n, NvmeRequest *req)
928 block_acct_start(blk_get_stats(req->ns->blkconf.blk), &req->acct, 0,
929 BLOCK_ACCT_FLUSH);
930 req->aiocb = blk_aio_flush(req->ns->blkconf.blk, nvme_rw_cb, req);
931 return NVME_NO_COMPLETE;
934 static uint16_t nvme_write_zeroes(NvmeCtrl *n, NvmeRequest *req)
936 NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd;
937 NvmeNamespace *ns = req->ns;
938 uint64_t slba = le64_to_cpu(rw->slba);
939 uint32_t nlb = (uint32_t)le16_to_cpu(rw->nlb) + 1;
940 uint64_t offset = nvme_l2b(ns, slba);
941 uint32_t count = nvme_l2b(ns, nlb);
942 uint16_t status;
944 trace_pci_nvme_write_zeroes(nvme_cid(req), nvme_nsid(ns), slba, nlb);
946 status = nvme_check_bounds(n, ns, slba, nlb);
947 if (status) {
948 trace_pci_nvme_err_invalid_lba_range(slba, nlb, ns->id_ns.nsze);
949 return status;
952 block_acct_start(blk_get_stats(req->ns->blkconf.blk), &req->acct, 0,
953 BLOCK_ACCT_WRITE);
954 req->aiocb = blk_aio_pwrite_zeroes(req->ns->blkconf.blk, offset, count,
955 BDRV_REQ_MAY_UNMAP, nvme_rw_cb, req);
956 return NVME_NO_COMPLETE;
959 static uint16_t nvme_rw(NvmeCtrl *n, NvmeRequest *req)
961 NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd;
962 NvmeNamespace *ns = req->ns;
963 uint32_t nlb = (uint32_t)le16_to_cpu(rw->nlb) + 1;
964 uint64_t slba = le64_to_cpu(rw->slba);
966 uint64_t data_size = nvme_l2b(ns, nlb);
967 uint64_t data_offset = nvme_l2b(ns, slba);
968 enum BlockAcctType acct = req->cmd.opcode == NVME_CMD_WRITE ?
969 BLOCK_ACCT_WRITE : BLOCK_ACCT_READ;
970 BlockBackend *blk = ns->blkconf.blk;
971 uint16_t status;
973 trace_pci_nvme_rw(nvme_cid(req), nvme_io_opc_str(rw->opcode),
974 nvme_nsid(ns), nlb, data_size, slba);
976 status = nvme_check_mdts(n, data_size);
977 if (status) {
978 trace_pci_nvme_err_mdts(nvme_cid(req), data_size);
979 goto invalid;
982 status = nvme_check_bounds(n, ns, slba, nlb);
983 if (status) {
984 trace_pci_nvme_err_invalid_lba_range(slba, nlb, ns->id_ns.nsze);
985 goto invalid;
988 status = nvme_map_dptr(n, data_size, req);
989 if (status) {
990 goto invalid;
993 block_acct_start(blk_get_stats(blk), &req->acct, data_size, acct);
994 if (req->qsg.sg) {
995 if (acct == BLOCK_ACCT_WRITE) {
996 req->aiocb = dma_blk_write(blk, &req->qsg, data_offset,
997 BDRV_SECTOR_SIZE, nvme_rw_cb, req);
998 } else {
999 req->aiocb = dma_blk_read(blk, &req->qsg, data_offset,
1000 BDRV_SECTOR_SIZE, nvme_rw_cb, req);
1002 } else {
1003 if (acct == BLOCK_ACCT_WRITE) {
1004 req->aiocb = blk_aio_pwritev(blk, data_offset, &req->iov, 0,
1005 nvme_rw_cb, req);
1006 } else {
1007 req->aiocb = blk_aio_preadv(blk, data_offset, &req->iov, 0,
1008 nvme_rw_cb, req);
1011 return NVME_NO_COMPLETE;
1013 invalid:
1014 block_acct_invalid(blk_get_stats(ns->blkconf.blk), acct);
1015 return status;
1018 static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeRequest *req)
1020 uint32_t nsid = le32_to_cpu(req->cmd.nsid);
1022 trace_pci_nvme_io_cmd(nvme_cid(req), nsid, nvme_sqid(req),
1023 req->cmd.opcode, nvme_io_opc_str(req->cmd.opcode));
1025 if (NVME_CC_CSS(n->bar.cc) == NVME_CC_CSS_ADMIN_ONLY) {
1026 return NVME_INVALID_OPCODE | NVME_DNR;
1029 if (!nvme_nsid_valid(n, nsid)) {
1030 return NVME_INVALID_NSID | NVME_DNR;
1033 req->ns = nvme_ns(n, nsid);
1034 if (unlikely(!req->ns)) {
1035 return NVME_INVALID_FIELD | NVME_DNR;
1038 switch (req->cmd.opcode) {
1039 case NVME_CMD_FLUSH:
1040 return nvme_flush(n, req);
1041 case NVME_CMD_WRITE_ZEROES:
1042 return nvme_write_zeroes(n, req);
1043 case NVME_CMD_WRITE:
1044 case NVME_CMD_READ:
1045 return nvme_rw(n, req);
1046 default:
1047 trace_pci_nvme_err_invalid_opc(req->cmd.opcode);
1048 return NVME_INVALID_OPCODE | NVME_DNR;
1052 static void nvme_free_sq(NvmeSQueue *sq, NvmeCtrl *n)
1054 n->sq[sq->sqid] = NULL;
1055 timer_free(sq->timer);
1056 g_free(sq->io_req);
1057 if (sq->sqid) {
1058 g_free(sq);
1062 static uint16_t nvme_del_sq(NvmeCtrl *n, NvmeRequest *req)
1064 NvmeDeleteQ *c = (NvmeDeleteQ *)&req->cmd;
1065 NvmeRequest *r, *next;
1066 NvmeSQueue *sq;
1067 NvmeCQueue *cq;
1068 uint16_t qid = le16_to_cpu(c->qid);
1070 if (unlikely(!qid || nvme_check_sqid(n, qid))) {
1071 trace_pci_nvme_err_invalid_del_sq(qid);
1072 return NVME_INVALID_QID | NVME_DNR;
1075 trace_pci_nvme_del_sq(qid);
1077 sq = n->sq[qid];
1078 while (!QTAILQ_EMPTY(&sq->out_req_list)) {
1079 r = QTAILQ_FIRST(&sq->out_req_list);
1080 assert(r->aiocb);
1081 blk_aio_cancel(r->aiocb);
1083 if (!nvme_check_cqid(n, sq->cqid)) {
1084 cq = n->cq[sq->cqid];
1085 QTAILQ_REMOVE(&cq->sq_list, sq, entry);
1087 nvme_post_cqes(cq);
1088 QTAILQ_FOREACH_SAFE(r, &cq->req_list, entry, next) {
1089 if (r->sq == sq) {
1090 QTAILQ_REMOVE(&cq->req_list, r, entry);
1091 QTAILQ_INSERT_TAIL(&sq->req_list, r, entry);
1096 nvme_free_sq(sq, n);
1097 return NVME_SUCCESS;
1100 static void nvme_init_sq(NvmeSQueue *sq, NvmeCtrl *n, uint64_t dma_addr,
1101 uint16_t sqid, uint16_t cqid, uint16_t size)
1103 int i;
1104 NvmeCQueue *cq;
1106 sq->ctrl = n;
1107 sq->dma_addr = dma_addr;
1108 sq->sqid = sqid;
1109 sq->size = size;
1110 sq->cqid = cqid;
1111 sq->head = sq->tail = 0;
1112 sq->io_req = g_new0(NvmeRequest, sq->size);
1114 QTAILQ_INIT(&sq->req_list);
1115 QTAILQ_INIT(&sq->out_req_list);
1116 for (i = 0; i < sq->size; i++) {
1117 sq->io_req[i].sq = sq;
1118 QTAILQ_INSERT_TAIL(&(sq->req_list), &sq->io_req[i], entry);
1120 sq->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, nvme_process_sq, sq);
1122 assert(n->cq[cqid]);
1123 cq = n->cq[cqid];
1124 QTAILQ_INSERT_TAIL(&(cq->sq_list), sq, entry);
1125 n->sq[sqid] = sq;
1128 static uint16_t nvme_create_sq(NvmeCtrl *n, NvmeRequest *req)
1130 NvmeSQueue *sq;
1131 NvmeCreateSq *c = (NvmeCreateSq *)&req->cmd;
1133 uint16_t cqid = le16_to_cpu(c->cqid);
1134 uint16_t sqid = le16_to_cpu(c->sqid);
1135 uint16_t qsize = le16_to_cpu(c->qsize);
1136 uint16_t qflags = le16_to_cpu(c->sq_flags);
1137 uint64_t prp1 = le64_to_cpu(c->prp1);
1139 trace_pci_nvme_create_sq(prp1, sqid, cqid, qsize, qflags);
1141 if (unlikely(!cqid || nvme_check_cqid(n, cqid))) {
1142 trace_pci_nvme_err_invalid_create_sq_cqid(cqid);
1143 return NVME_INVALID_CQID | NVME_DNR;
1145 if (unlikely(!sqid || sqid > n->params.max_ioqpairs ||
1146 n->sq[sqid] != NULL)) {
1147 trace_pci_nvme_err_invalid_create_sq_sqid(sqid);
1148 return NVME_INVALID_QID | NVME_DNR;
1150 if (unlikely(!qsize || qsize > NVME_CAP_MQES(n->bar.cap))) {
1151 trace_pci_nvme_err_invalid_create_sq_size(qsize);
1152 return NVME_MAX_QSIZE_EXCEEDED | NVME_DNR;
1154 if (unlikely(prp1 & (n->page_size - 1))) {
1155 trace_pci_nvme_err_invalid_create_sq_addr(prp1);
1156 return NVME_INVALID_PRP_OFFSET | NVME_DNR;
1158 if (unlikely(!(NVME_SQ_FLAGS_PC(qflags)))) {
1159 trace_pci_nvme_err_invalid_create_sq_qflags(NVME_SQ_FLAGS_PC(qflags));
1160 return NVME_INVALID_FIELD | NVME_DNR;
1162 sq = g_malloc0(sizeof(*sq));
1163 nvme_init_sq(sq, n, prp1, sqid, cqid, qsize + 1);
1164 return NVME_SUCCESS;
1167 struct nvme_stats {
1168 uint64_t units_read;
1169 uint64_t units_written;
1170 uint64_t read_commands;
1171 uint64_t write_commands;
1174 static void nvme_set_blk_stats(NvmeNamespace *ns, struct nvme_stats *stats)
1176 BlockAcctStats *s = blk_get_stats(ns->blkconf.blk);
1178 stats->units_read += s->nr_bytes[BLOCK_ACCT_READ] >> BDRV_SECTOR_BITS;
1179 stats->units_written += s->nr_bytes[BLOCK_ACCT_WRITE] >> BDRV_SECTOR_BITS;
1180 stats->read_commands += s->nr_ops[BLOCK_ACCT_READ];
1181 stats->write_commands += s->nr_ops[BLOCK_ACCT_WRITE];
1184 static uint16_t nvme_smart_info(NvmeCtrl *n, uint8_t rae, uint32_t buf_len,
1185 uint64_t off, NvmeRequest *req)
1187 uint32_t nsid = le32_to_cpu(req->cmd.nsid);
1188 struct nvme_stats stats = { 0 };
1189 NvmeSmartLog smart = { 0 };
1190 uint32_t trans_len;
1191 NvmeNamespace *ns;
1192 time_t current_ms;
1194 if (off >= sizeof(smart)) {
1195 return NVME_INVALID_FIELD | NVME_DNR;
1198 if (nsid != 0xffffffff) {
1199 ns = nvme_ns(n, nsid);
1200 if (!ns) {
1201 return NVME_INVALID_NSID | NVME_DNR;
1203 nvme_set_blk_stats(ns, &stats);
1204 } else {
1205 int i;
1207 for (i = 1; i <= n->num_namespaces; i++) {
1208 ns = nvme_ns(n, i);
1209 if (!ns) {
1210 continue;
1212 nvme_set_blk_stats(ns, &stats);
1216 trans_len = MIN(sizeof(smart) - off, buf_len);
1218 smart.data_units_read[0] = cpu_to_le64(DIV_ROUND_UP(stats.units_read,
1219 1000));
1220 smart.data_units_written[0] = cpu_to_le64(DIV_ROUND_UP(stats.units_written,
1221 1000));
1222 smart.host_read_commands[0] = cpu_to_le64(stats.read_commands);
1223 smart.host_write_commands[0] = cpu_to_le64(stats.write_commands);
1225 smart.temperature = cpu_to_le16(n->temperature);
1227 if ((n->temperature >= n->features.temp_thresh_hi) ||
1228 (n->temperature <= n->features.temp_thresh_low)) {
1229 smart.critical_warning |= NVME_SMART_TEMPERATURE;
1232 current_ms = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL);
1233 smart.power_on_hours[0] =
1234 cpu_to_le64((((current_ms - n->starttime_ms) / 1000) / 60) / 60);
1236 if (!rae) {
1237 nvme_clear_events(n, NVME_AER_TYPE_SMART);
1240 return nvme_dma(n, (uint8_t *) &smart + off, trans_len,
1241 DMA_DIRECTION_FROM_DEVICE, req);
1244 static uint16_t nvme_fw_log_info(NvmeCtrl *n, uint32_t buf_len, uint64_t off,
1245 NvmeRequest *req)
1247 uint32_t trans_len;
1248 NvmeFwSlotInfoLog fw_log = {
1249 .afi = 0x1,
1252 if (off >= sizeof(fw_log)) {
1253 return NVME_INVALID_FIELD | NVME_DNR;
1256 strpadcpy((char *)&fw_log.frs1, sizeof(fw_log.frs1), "1.0", ' ');
1257 trans_len = MIN(sizeof(fw_log) - off, buf_len);
1259 return nvme_dma(n, (uint8_t *) &fw_log + off, trans_len,
1260 DMA_DIRECTION_FROM_DEVICE, req);
1263 static uint16_t nvme_error_info(NvmeCtrl *n, uint8_t rae, uint32_t buf_len,
1264 uint64_t off, NvmeRequest *req)
1266 uint32_t trans_len;
1267 NvmeErrorLog errlog;
1269 if (off >= sizeof(errlog)) {
1270 return NVME_INVALID_FIELD | NVME_DNR;
1273 if (!rae) {
1274 nvme_clear_events(n, NVME_AER_TYPE_ERROR);
1277 memset(&errlog, 0x0, sizeof(errlog));
1278 trans_len = MIN(sizeof(errlog) - off, buf_len);
1280 return nvme_dma(n, (uint8_t *)&errlog, trans_len,
1281 DMA_DIRECTION_FROM_DEVICE, req);
1284 static uint16_t nvme_get_log(NvmeCtrl *n, NvmeRequest *req)
1286 NvmeCmd *cmd = &req->cmd;
1288 uint32_t dw10 = le32_to_cpu(cmd->cdw10);
1289 uint32_t dw11 = le32_to_cpu(cmd->cdw11);
1290 uint32_t dw12 = le32_to_cpu(cmd->cdw12);
1291 uint32_t dw13 = le32_to_cpu(cmd->cdw13);
1292 uint8_t lid = dw10 & 0xff;
1293 uint8_t lsp = (dw10 >> 8) & 0xf;
1294 uint8_t rae = (dw10 >> 15) & 0x1;
1295 uint32_t numdl, numdu;
1296 uint64_t off, lpol, lpou;
1297 size_t len;
1298 uint16_t status;
1300 numdl = (dw10 >> 16);
1301 numdu = (dw11 & 0xffff);
1302 lpol = dw12;
1303 lpou = dw13;
1305 len = (((numdu << 16) | numdl) + 1) << 2;
1306 off = (lpou << 32ULL) | lpol;
1308 if (off & 0x3) {
1309 return NVME_INVALID_FIELD | NVME_DNR;
1312 trace_pci_nvme_get_log(nvme_cid(req), lid, lsp, rae, len, off);
1314 status = nvme_check_mdts(n, len);
1315 if (status) {
1316 trace_pci_nvme_err_mdts(nvme_cid(req), len);
1317 return status;
1320 switch (lid) {
1321 case NVME_LOG_ERROR_INFO:
1322 return nvme_error_info(n, rae, len, off, req);
1323 case NVME_LOG_SMART_INFO:
1324 return nvme_smart_info(n, rae, len, off, req);
1325 case NVME_LOG_FW_SLOT_INFO:
1326 return nvme_fw_log_info(n, len, off, req);
1327 default:
1328 trace_pci_nvme_err_invalid_log_page(nvme_cid(req), lid);
1329 return NVME_INVALID_FIELD | NVME_DNR;
1333 static void nvme_free_cq(NvmeCQueue *cq, NvmeCtrl *n)
1335 n->cq[cq->cqid] = NULL;
1336 timer_free(cq->timer);
1337 msix_vector_unuse(&n->parent_obj, cq->vector);
1338 if (cq->cqid) {
1339 g_free(cq);
1343 static uint16_t nvme_del_cq(NvmeCtrl *n, NvmeRequest *req)
1345 NvmeDeleteQ *c = (NvmeDeleteQ *)&req->cmd;
1346 NvmeCQueue *cq;
1347 uint16_t qid = le16_to_cpu(c->qid);
1349 if (unlikely(!qid || nvme_check_cqid(n, qid))) {
1350 trace_pci_nvme_err_invalid_del_cq_cqid(qid);
1351 return NVME_INVALID_CQID | NVME_DNR;
1354 cq = n->cq[qid];
1355 if (unlikely(!QTAILQ_EMPTY(&cq->sq_list))) {
1356 trace_pci_nvme_err_invalid_del_cq_notempty(qid);
1357 return NVME_INVALID_QUEUE_DEL;
1359 nvme_irq_deassert(n, cq);
1360 trace_pci_nvme_del_cq(qid);
1361 nvme_free_cq(cq, n);
1362 return NVME_SUCCESS;
1365 static void nvme_init_cq(NvmeCQueue *cq, NvmeCtrl *n, uint64_t dma_addr,
1366 uint16_t cqid, uint16_t vector, uint16_t size,
1367 uint16_t irq_enabled)
1369 int ret;
1371 ret = msix_vector_use(&n->parent_obj, vector);
1372 assert(ret == 0);
1373 cq->ctrl = n;
1374 cq->cqid = cqid;
1375 cq->size = size;
1376 cq->dma_addr = dma_addr;
1377 cq->phase = 1;
1378 cq->irq_enabled = irq_enabled;
1379 cq->vector = vector;
1380 cq->head = cq->tail = 0;
1381 QTAILQ_INIT(&cq->req_list);
1382 QTAILQ_INIT(&cq->sq_list);
1383 n->cq[cqid] = cq;
1384 cq->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, nvme_post_cqes, cq);
1387 static uint16_t nvme_create_cq(NvmeCtrl *n, NvmeRequest *req)
1389 NvmeCQueue *cq;
1390 NvmeCreateCq *c = (NvmeCreateCq *)&req->cmd;
1391 uint16_t cqid = le16_to_cpu(c->cqid);
1392 uint16_t vector = le16_to_cpu(c->irq_vector);
1393 uint16_t qsize = le16_to_cpu(c->qsize);
1394 uint16_t qflags = le16_to_cpu(c->cq_flags);
1395 uint64_t prp1 = le64_to_cpu(c->prp1);
1397 trace_pci_nvme_create_cq(prp1, cqid, vector, qsize, qflags,
1398 NVME_CQ_FLAGS_IEN(qflags) != 0);
1400 if (unlikely(!cqid || cqid > n->params.max_ioqpairs ||
1401 n->cq[cqid] != NULL)) {
1402 trace_pci_nvme_err_invalid_create_cq_cqid(cqid);
1403 return NVME_INVALID_QID | NVME_DNR;
1405 if (unlikely(!qsize || qsize > NVME_CAP_MQES(n->bar.cap))) {
1406 trace_pci_nvme_err_invalid_create_cq_size(qsize);
1407 return NVME_MAX_QSIZE_EXCEEDED | NVME_DNR;
1409 if (unlikely(prp1 & (n->page_size - 1))) {
1410 trace_pci_nvme_err_invalid_create_cq_addr(prp1);
1411 return NVME_INVALID_PRP_OFFSET | NVME_DNR;
1413 if (unlikely(!msix_enabled(&n->parent_obj) && vector)) {
1414 trace_pci_nvme_err_invalid_create_cq_vector(vector);
1415 return NVME_INVALID_IRQ_VECTOR | NVME_DNR;
1417 if (unlikely(vector >= n->params.msix_qsize)) {
1418 trace_pci_nvme_err_invalid_create_cq_vector(vector);
1419 return NVME_INVALID_IRQ_VECTOR | NVME_DNR;
1421 if (unlikely(!(NVME_CQ_FLAGS_PC(qflags)))) {
1422 trace_pci_nvme_err_invalid_create_cq_qflags(NVME_CQ_FLAGS_PC(qflags));
1423 return NVME_INVALID_FIELD | NVME_DNR;
1426 cq = g_malloc0(sizeof(*cq));
1427 nvme_init_cq(cq, n, prp1, cqid, vector, qsize + 1,
1428 NVME_CQ_FLAGS_IEN(qflags));
1431 * It is only required to set qs_created when creating a completion queue;
1432 * creating a submission queue without a matching completion queue will
1433 * fail.
1435 n->qs_created = true;
1436 return NVME_SUCCESS;
1439 static uint16_t nvme_identify_ctrl(NvmeCtrl *n, NvmeRequest *req)
1441 trace_pci_nvme_identify_ctrl();
1443 return nvme_dma(n, (uint8_t *)&n->id_ctrl, sizeof(n->id_ctrl),
1444 DMA_DIRECTION_FROM_DEVICE, req);
1447 static uint16_t nvme_identify_ns(NvmeCtrl *n, NvmeRequest *req)
1449 NvmeNamespace *ns;
1450 NvmeIdentify *c = (NvmeIdentify *)&req->cmd;
1451 NvmeIdNs *id_ns, inactive = { 0 };
1452 uint32_t nsid = le32_to_cpu(c->nsid);
1454 trace_pci_nvme_identify_ns(nsid);
1456 if (!nvme_nsid_valid(n, nsid) || nsid == NVME_NSID_BROADCAST) {
1457 return NVME_INVALID_NSID | NVME_DNR;
1460 ns = nvme_ns(n, nsid);
1461 if (unlikely(!ns)) {
1462 id_ns = &inactive;
1463 } else {
1464 id_ns = &ns->id_ns;
1467 return nvme_dma(n, (uint8_t *)id_ns, sizeof(NvmeIdNs),
1468 DMA_DIRECTION_FROM_DEVICE, req);
1471 static uint16_t nvme_identify_nslist(NvmeCtrl *n, NvmeRequest *req)
1473 NvmeIdentify *c = (NvmeIdentify *)&req->cmd;
1474 static const int data_len = NVME_IDENTIFY_DATA_SIZE;
1475 uint32_t min_nsid = le32_to_cpu(c->nsid);
1476 uint32_t *list;
1477 uint16_t ret;
1478 int j = 0;
1480 trace_pci_nvme_identify_nslist(min_nsid);
1483 * Both 0xffffffff (NVME_NSID_BROADCAST) and 0xfffffffe are invalid values
1484 * since the Active Namespace ID List should return namespaces with ids
1485 * *higher* than the NSID specified in the command. This is also specified
1486 * in the spec (NVM Express v1.3d, Section 5.15.4).
1488 if (min_nsid >= NVME_NSID_BROADCAST - 1) {
1489 return NVME_INVALID_NSID | NVME_DNR;
1492 list = g_malloc0(data_len);
1493 for (int i = 1; i <= n->num_namespaces; i++) {
1494 if (i <= min_nsid || !nvme_ns(n, i)) {
1495 continue;
1497 list[j++] = cpu_to_le32(i);
1498 if (j == data_len / sizeof(uint32_t)) {
1499 break;
1502 ret = nvme_dma(n, (uint8_t *)list, data_len, DMA_DIRECTION_FROM_DEVICE,
1503 req);
1504 g_free(list);
1505 return ret;
1508 static uint16_t nvme_identify_ns_descr_list(NvmeCtrl *n, NvmeRequest *req)
1510 NvmeIdentify *c = (NvmeIdentify *)&req->cmd;
1511 uint32_t nsid = le32_to_cpu(c->nsid);
1512 uint8_t list[NVME_IDENTIFY_DATA_SIZE];
1514 struct data {
1515 struct {
1516 NvmeIdNsDescr hdr;
1517 uint8_t v[16];
1518 } uuid;
1521 struct data *ns_descrs = (struct data *)list;
1523 trace_pci_nvme_identify_ns_descr_list(nsid);
1525 if (!nvme_nsid_valid(n, nsid) || nsid == NVME_NSID_BROADCAST) {
1526 return NVME_INVALID_NSID | NVME_DNR;
1529 if (unlikely(!nvme_ns(n, nsid))) {
1530 return NVME_INVALID_FIELD | NVME_DNR;
1533 memset(list, 0x0, sizeof(list));
1536 * Because the NGUID and EUI64 fields are 0 in the Identify Namespace data
1537 * structure, a Namespace UUID (nidt = 0x3) must be reported in the
1538 * Namespace Identification Descriptor. Add a very basic Namespace UUID
1539 * here.
1541 ns_descrs->uuid.hdr.nidt = NVME_NIDT_UUID;
1542 ns_descrs->uuid.hdr.nidl = NVME_NIDT_UUID_LEN;
1543 stl_be_p(&ns_descrs->uuid.v, nsid);
1545 return nvme_dma(n, list, NVME_IDENTIFY_DATA_SIZE,
1546 DMA_DIRECTION_FROM_DEVICE, req);
1549 static uint16_t nvme_identify(NvmeCtrl *n, NvmeRequest *req)
1551 NvmeIdentify *c = (NvmeIdentify *)&req->cmd;
1553 switch (le32_to_cpu(c->cns)) {
1554 case NVME_ID_CNS_NS:
1555 return nvme_identify_ns(n, req);
1556 case NVME_ID_CNS_CTRL:
1557 return nvme_identify_ctrl(n, req);
1558 case NVME_ID_CNS_NS_ACTIVE_LIST:
1559 return nvme_identify_nslist(n, req);
1560 case NVME_ID_CNS_NS_DESCR_LIST:
1561 return nvme_identify_ns_descr_list(n, req);
1562 default:
1563 trace_pci_nvme_err_invalid_identify_cns(le32_to_cpu(c->cns));
1564 return NVME_INVALID_FIELD | NVME_DNR;
1568 static uint16_t nvme_abort(NvmeCtrl *n, NvmeRequest *req)
1570 uint16_t sqid = le32_to_cpu(req->cmd.cdw10) & 0xffff;
1572 req->cqe.result = 1;
1573 if (nvme_check_sqid(n, sqid)) {
1574 return NVME_INVALID_FIELD | NVME_DNR;
1577 return NVME_SUCCESS;
1580 static inline void nvme_set_timestamp(NvmeCtrl *n, uint64_t ts)
1582 trace_pci_nvme_setfeat_timestamp(ts);
1584 n->host_timestamp = le64_to_cpu(ts);
1585 n->timestamp_set_qemu_clock_ms = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL);
1588 static inline uint64_t nvme_get_timestamp(const NvmeCtrl *n)
1590 uint64_t current_time = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL);
1591 uint64_t elapsed_time = current_time - n->timestamp_set_qemu_clock_ms;
1593 union nvme_timestamp {
1594 struct {
1595 uint64_t timestamp:48;
1596 uint64_t sync:1;
1597 uint64_t origin:3;
1598 uint64_t rsvd1:12;
1600 uint64_t all;
1603 union nvme_timestamp ts;
1604 ts.all = 0;
1605 ts.timestamp = n->host_timestamp + elapsed_time;
1607 /* If the host timestamp is non-zero, set the timestamp origin */
1608 ts.origin = n->host_timestamp ? 0x01 : 0x00;
1610 trace_pci_nvme_getfeat_timestamp(ts.all);
1612 return cpu_to_le64(ts.all);
1615 static uint16_t nvme_get_feature_timestamp(NvmeCtrl *n, NvmeRequest *req)
1617 uint64_t timestamp = nvme_get_timestamp(n);
1619 return nvme_dma(n, (uint8_t *)&timestamp, sizeof(timestamp),
1620 DMA_DIRECTION_FROM_DEVICE, req);
1623 static uint16_t nvme_get_feature(NvmeCtrl *n, NvmeRequest *req)
1625 NvmeCmd *cmd = &req->cmd;
1626 uint32_t dw10 = le32_to_cpu(cmd->cdw10);
1627 uint32_t dw11 = le32_to_cpu(cmd->cdw11);
1628 uint32_t nsid = le32_to_cpu(cmd->nsid);
1629 uint32_t result;
1630 uint8_t fid = NVME_GETSETFEAT_FID(dw10);
1631 NvmeGetFeatureSelect sel = NVME_GETFEAT_SELECT(dw10);
1632 uint16_t iv;
1634 static const uint32_t nvme_feature_default[NVME_FID_MAX] = {
1635 [NVME_ARBITRATION] = NVME_ARB_AB_NOLIMIT,
1638 trace_pci_nvme_getfeat(nvme_cid(req), nsid, fid, sel, dw11);
1640 if (!nvme_feature_support[fid]) {
1641 return NVME_INVALID_FIELD | NVME_DNR;
1644 if (nvme_feature_cap[fid] & NVME_FEAT_CAP_NS) {
1645 if (!nvme_nsid_valid(n, nsid) || nsid == NVME_NSID_BROADCAST) {
1647 * The Reservation Notification Mask and Reservation Persistence
1648 * features require a status code of Invalid Field in Command when
1649 * NSID is 0xFFFFFFFF. Since the device does not support those
1650 * features we can always return Invalid Namespace or Format as we
1651 * should do for all other features.
1653 return NVME_INVALID_NSID | NVME_DNR;
1656 if (!nvme_ns(n, nsid)) {
1657 return NVME_INVALID_FIELD | NVME_DNR;
1661 switch (sel) {
1662 case NVME_GETFEAT_SELECT_CURRENT:
1663 break;
1664 case NVME_GETFEAT_SELECT_SAVED:
1665 /* no features are saveable by the controller; fallthrough */
1666 case NVME_GETFEAT_SELECT_DEFAULT:
1667 goto defaults;
1668 case NVME_GETFEAT_SELECT_CAP:
1669 result = nvme_feature_cap[fid];
1670 goto out;
1673 switch (fid) {
1674 case NVME_TEMPERATURE_THRESHOLD:
1675 result = 0;
1678 * The controller only implements the Composite Temperature sensor, so
1679 * return 0 for all other sensors.
1681 if (NVME_TEMP_TMPSEL(dw11) != NVME_TEMP_TMPSEL_COMPOSITE) {
1682 goto out;
1685 switch (NVME_TEMP_THSEL(dw11)) {
1686 case NVME_TEMP_THSEL_OVER:
1687 result = n->features.temp_thresh_hi;
1688 goto out;
1689 case NVME_TEMP_THSEL_UNDER:
1690 result = n->features.temp_thresh_low;
1691 goto out;
1694 return NVME_INVALID_FIELD | NVME_DNR;
1695 case NVME_VOLATILE_WRITE_CACHE:
1696 result = n->features.vwc;
1697 trace_pci_nvme_getfeat_vwcache(result ? "enabled" : "disabled");
1698 goto out;
1699 case NVME_ASYNCHRONOUS_EVENT_CONF:
1700 result = n->features.async_config;
1701 goto out;
1702 case NVME_TIMESTAMP:
1703 return nvme_get_feature_timestamp(n, req);
1704 default:
1705 break;
1708 defaults:
1709 switch (fid) {
1710 case NVME_TEMPERATURE_THRESHOLD:
1711 result = 0;
1713 if (NVME_TEMP_TMPSEL(dw11) != NVME_TEMP_TMPSEL_COMPOSITE) {
1714 break;
1717 if (NVME_TEMP_THSEL(dw11) == NVME_TEMP_THSEL_OVER) {
1718 result = NVME_TEMPERATURE_WARNING;
1721 break;
1722 case NVME_NUMBER_OF_QUEUES:
1723 result = (n->params.max_ioqpairs - 1) |
1724 ((n->params.max_ioqpairs - 1) << 16);
1725 trace_pci_nvme_getfeat_numq(result);
1726 break;
1727 case NVME_INTERRUPT_VECTOR_CONF:
1728 iv = dw11 & 0xffff;
1729 if (iv >= n->params.max_ioqpairs + 1) {
1730 return NVME_INVALID_FIELD | NVME_DNR;
1733 result = iv;
1734 if (iv == n->admin_cq.vector) {
1735 result |= NVME_INTVC_NOCOALESCING;
1738 break;
1739 default:
1740 result = nvme_feature_default[fid];
1741 break;
1744 out:
1745 req->cqe.result = cpu_to_le32(result);
1746 return NVME_SUCCESS;
1749 static uint16_t nvme_set_feature_timestamp(NvmeCtrl *n, NvmeRequest *req)
1751 uint16_t ret;
1752 uint64_t timestamp;
1754 ret = nvme_dma(n, (uint8_t *)&timestamp, sizeof(timestamp),
1755 DMA_DIRECTION_TO_DEVICE, req);
1756 if (ret != NVME_SUCCESS) {
1757 return ret;
1760 nvme_set_timestamp(n, timestamp);
1762 return NVME_SUCCESS;
1765 static uint16_t nvme_set_feature(NvmeCtrl *n, NvmeRequest *req)
1767 NvmeNamespace *ns;
1769 NvmeCmd *cmd = &req->cmd;
1770 uint32_t dw10 = le32_to_cpu(cmd->cdw10);
1771 uint32_t dw11 = le32_to_cpu(cmd->cdw11);
1772 uint32_t nsid = le32_to_cpu(cmd->nsid);
1773 uint8_t fid = NVME_GETSETFEAT_FID(dw10);
1774 uint8_t save = NVME_SETFEAT_SAVE(dw10);
1776 trace_pci_nvme_setfeat(nvme_cid(req), nsid, fid, save, dw11);
1778 if (save) {
1779 return NVME_FID_NOT_SAVEABLE | NVME_DNR;
1782 if (!nvme_feature_support[fid]) {
1783 return NVME_INVALID_FIELD | NVME_DNR;
1786 if (nvme_feature_cap[fid] & NVME_FEAT_CAP_NS) {
1787 if (nsid != NVME_NSID_BROADCAST) {
1788 if (!nvme_nsid_valid(n, nsid)) {
1789 return NVME_INVALID_NSID | NVME_DNR;
1792 ns = nvme_ns(n, nsid);
1793 if (unlikely(!ns)) {
1794 return NVME_INVALID_FIELD | NVME_DNR;
1797 } else if (nsid && nsid != NVME_NSID_BROADCAST) {
1798 if (!nvme_nsid_valid(n, nsid)) {
1799 return NVME_INVALID_NSID | NVME_DNR;
1802 return NVME_FEAT_NOT_NS_SPEC | NVME_DNR;
1805 if (!(nvme_feature_cap[fid] & NVME_FEAT_CAP_CHANGE)) {
1806 return NVME_FEAT_NOT_CHANGEABLE | NVME_DNR;
1809 switch (fid) {
1810 case NVME_TEMPERATURE_THRESHOLD:
1811 if (NVME_TEMP_TMPSEL(dw11) != NVME_TEMP_TMPSEL_COMPOSITE) {
1812 break;
1815 switch (NVME_TEMP_THSEL(dw11)) {
1816 case NVME_TEMP_THSEL_OVER:
1817 n->features.temp_thresh_hi = NVME_TEMP_TMPTH(dw11);
1818 break;
1819 case NVME_TEMP_THSEL_UNDER:
1820 n->features.temp_thresh_low = NVME_TEMP_TMPTH(dw11);
1821 break;
1822 default:
1823 return NVME_INVALID_FIELD | NVME_DNR;
1826 if (((n->temperature >= n->features.temp_thresh_hi) ||
1827 (n->temperature <= n->features.temp_thresh_low)) &&
1828 NVME_AEC_SMART(n->features.async_config) & NVME_SMART_TEMPERATURE) {
1829 nvme_enqueue_event(n, NVME_AER_TYPE_SMART,
1830 NVME_AER_INFO_SMART_TEMP_THRESH,
1831 NVME_LOG_SMART_INFO);
1834 break;
1835 case NVME_VOLATILE_WRITE_CACHE:
1836 n->features.vwc = dw11 & 0x1;
1838 for (int i = 1; i <= n->num_namespaces; i++) {
1839 ns = nvme_ns(n, i);
1840 if (!ns) {
1841 continue;
1844 if (!(dw11 & 0x1) && blk_enable_write_cache(ns->blkconf.blk)) {
1845 blk_flush(ns->blkconf.blk);
1848 blk_set_enable_write_cache(ns->blkconf.blk, dw11 & 1);
1851 break;
1853 case NVME_NUMBER_OF_QUEUES:
1854 if (n->qs_created) {
1855 return NVME_CMD_SEQ_ERROR | NVME_DNR;
1859 * NVMe v1.3, Section 5.21.1.7: 0xffff is not an allowed value for NCQR
1860 * and NSQR.
1862 if ((dw11 & 0xffff) == 0xffff || ((dw11 >> 16) & 0xffff) == 0xffff) {
1863 return NVME_INVALID_FIELD | NVME_DNR;
1866 trace_pci_nvme_setfeat_numq((dw11 & 0xFFFF) + 1,
1867 ((dw11 >> 16) & 0xFFFF) + 1,
1868 n->params.max_ioqpairs,
1869 n->params.max_ioqpairs);
1870 req->cqe.result = cpu_to_le32((n->params.max_ioqpairs - 1) |
1871 ((n->params.max_ioqpairs - 1) << 16));
1872 break;
1873 case NVME_ASYNCHRONOUS_EVENT_CONF:
1874 n->features.async_config = dw11;
1875 break;
1876 case NVME_TIMESTAMP:
1877 return nvme_set_feature_timestamp(n, req);
1878 default:
1879 return NVME_FEAT_NOT_CHANGEABLE | NVME_DNR;
1881 return NVME_SUCCESS;
1884 static uint16_t nvme_aer(NvmeCtrl *n, NvmeRequest *req)
1886 trace_pci_nvme_aer(nvme_cid(req));
1888 if (n->outstanding_aers > n->params.aerl) {
1889 trace_pci_nvme_aer_aerl_exceeded();
1890 return NVME_AER_LIMIT_EXCEEDED;
1893 n->aer_reqs[n->outstanding_aers] = req;
1894 n->outstanding_aers++;
1896 if (!QTAILQ_EMPTY(&n->aer_queue)) {
1897 nvme_process_aers(n);
1900 return NVME_NO_COMPLETE;
1903 static uint16_t nvme_admin_cmd(NvmeCtrl *n, NvmeRequest *req)
1905 trace_pci_nvme_admin_cmd(nvme_cid(req), nvme_sqid(req), req->cmd.opcode,
1906 nvme_adm_opc_str(req->cmd.opcode));
1908 switch (req->cmd.opcode) {
1909 case NVME_ADM_CMD_DELETE_SQ:
1910 return nvme_del_sq(n, req);
1911 case NVME_ADM_CMD_CREATE_SQ:
1912 return nvme_create_sq(n, req);
1913 case NVME_ADM_CMD_GET_LOG_PAGE:
1914 return nvme_get_log(n, req);
1915 case NVME_ADM_CMD_DELETE_CQ:
1916 return nvme_del_cq(n, req);
1917 case NVME_ADM_CMD_CREATE_CQ:
1918 return nvme_create_cq(n, req);
1919 case NVME_ADM_CMD_IDENTIFY:
1920 return nvme_identify(n, req);
1921 case NVME_ADM_CMD_ABORT:
1922 return nvme_abort(n, req);
1923 case NVME_ADM_CMD_SET_FEATURES:
1924 return nvme_set_feature(n, req);
1925 case NVME_ADM_CMD_GET_FEATURES:
1926 return nvme_get_feature(n, req);
1927 case NVME_ADM_CMD_ASYNC_EV_REQ:
1928 return nvme_aer(n, req);
1929 default:
1930 trace_pci_nvme_err_invalid_admin_opc(req->cmd.opcode);
1931 return NVME_INVALID_OPCODE | NVME_DNR;
1935 static void nvme_process_sq(void *opaque)
1937 NvmeSQueue *sq = opaque;
1938 NvmeCtrl *n = sq->ctrl;
1939 NvmeCQueue *cq = n->cq[sq->cqid];
1941 uint16_t status;
1942 hwaddr addr;
1943 NvmeCmd cmd;
1944 NvmeRequest *req;
1946 while (!(nvme_sq_empty(sq) || QTAILQ_EMPTY(&sq->req_list))) {
1947 addr = sq->dma_addr + sq->head * n->sqe_size;
1948 if (nvme_addr_read(n, addr, (void *)&cmd, sizeof(cmd))) {
1949 trace_pci_nvme_err_addr_read(addr);
1950 trace_pci_nvme_err_cfs();
1951 n->bar.csts = NVME_CSTS_FAILED;
1952 break;
1954 nvme_inc_sq_head(sq);
1956 req = QTAILQ_FIRST(&sq->req_list);
1957 QTAILQ_REMOVE(&sq->req_list, req, entry);
1958 QTAILQ_INSERT_TAIL(&sq->out_req_list, req, entry);
1959 nvme_req_clear(req);
1960 req->cqe.cid = cmd.cid;
1961 memcpy(&req->cmd, &cmd, sizeof(NvmeCmd));
1963 status = sq->sqid ? nvme_io_cmd(n, req) :
1964 nvme_admin_cmd(n, req);
1965 if (status != NVME_NO_COMPLETE) {
1966 req->status = status;
1967 nvme_enqueue_req_completion(cq, req);
1972 static void nvme_clear_ctrl(NvmeCtrl *n)
1974 NvmeNamespace *ns;
1975 int i;
1977 for (i = 1; i <= n->num_namespaces; i++) {
1978 ns = nvme_ns(n, i);
1979 if (!ns) {
1980 continue;
1983 nvme_ns_drain(ns);
1986 for (i = 0; i < n->params.max_ioqpairs + 1; i++) {
1987 if (n->sq[i] != NULL) {
1988 nvme_free_sq(n->sq[i], n);
1991 for (i = 0; i < n->params.max_ioqpairs + 1; i++) {
1992 if (n->cq[i] != NULL) {
1993 nvme_free_cq(n->cq[i], n);
1997 while (!QTAILQ_EMPTY(&n->aer_queue)) {
1998 NvmeAsyncEvent *event = QTAILQ_FIRST(&n->aer_queue);
1999 QTAILQ_REMOVE(&n->aer_queue, event, entry);
2000 g_free(event);
2003 n->aer_queued = 0;
2004 n->outstanding_aers = 0;
2005 n->qs_created = false;
2007 for (i = 1; i <= n->num_namespaces; i++) {
2008 ns = nvme_ns(n, i);
2009 if (!ns) {
2010 continue;
2013 nvme_ns_flush(ns);
2016 n->bar.cc = 0;
2019 static int nvme_start_ctrl(NvmeCtrl *n)
2021 uint32_t page_bits = NVME_CC_MPS(n->bar.cc) + 12;
2022 uint32_t page_size = 1 << page_bits;
2024 if (unlikely(n->cq[0])) {
2025 trace_pci_nvme_err_startfail_cq();
2026 return -1;
2028 if (unlikely(n->sq[0])) {
2029 trace_pci_nvme_err_startfail_sq();
2030 return -1;
2032 if (unlikely(!n->bar.asq)) {
2033 trace_pci_nvme_err_startfail_nbarasq();
2034 return -1;
2036 if (unlikely(!n->bar.acq)) {
2037 trace_pci_nvme_err_startfail_nbaracq();
2038 return -1;
2040 if (unlikely(n->bar.asq & (page_size - 1))) {
2041 trace_pci_nvme_err_startfail_asq_misaligned(n->bar.asq);
2042 return -1;
2044 if (unlikely(n->bar.acq & (page_size - 1))) {
2045 trace_pci_nvme_err_startfail_acq_misaligned(n->bar.acq);
2046 return -1;
2048 if (unlikely(!(NVME_CAP_CSS(n->bar.cap) & (1 << NVME_CC_CSS(n->bar.cc))))) {
2049 trace_pci_nvme_err_startfail_css(NVME_CC_CSS(n->bar.cc));
2050 return -1;
2052 if (unlikely(NVME_CC_MPS(n->bar.cc) <
2053 NVME_CAP_MPSMIN(n->bar.cap))) {
2054 trace_pci_nvme_err_startfail_page_too_small(
2055 NVME_CC_MPS(n->bar.cc),
2056 NVME_CAP_MPSMIN(n->bar.cap));
2057 return -1;
2059 if (unlikely(NVME_CC_MPS(n->bar.cc) >
2060 NVME_CAP_MPSMAX(n->bar.cap))) {
2061 trace_pci_nvme_err_startfail_page_too_large(
2062 NVME_CC_MPS(n->bar.cc),
2063 NVME_CAP_MPSMAX(n->bar.cap));
2064 return -1;
2066 if (unlikely(NVME_CC_IOCQES(n->bar.cc) <
2067 NVME_CTRL_CQES_MIN(n->id_ctrl.cqes))) {
2068 trace_pci_nvme_err_startfail_cqent_too_small(
2069 NVME_CC_IOCQES(n->bar.cc),
2070 NVME_CTRL_CQES_MIN(n->bar.cap));
2071 return -1;
2073 if (unlikely(NVME_CC_IOCQES(n->bar.cc) >
2074 NVME_CTRL_CQES_MAX(n->id_ctrl.cqes))) {
2075 trace_pci_nvme_err_startfail_cqent_too_large(
2076 NVME_CC_IOCQES(n->bar.cc),
2077 NVME_CTRL_CQES_MAX(n->bar.cap));
2078 return -1;
2080 if (unlikely(NVME_CC_IOSQES(n->bar.cc) <
2081 NVME_CTRL_SQES_MIN(n->id_ctrl.sqes))) {
2082 trace_pci_nvme_err_startfail_sqent_too_small(
2083 NVME_CC_IOSQES(n->bar.cc),
2084 NVME_CTRL_SQES_MIN(n->bar.cap));
2085 return -1;
2087 if (unlikely(NVME_CC_IOSQES(n->bar.cc) >
2088 NVME_CTRL_SQES_MAX(n->id_ctrl.sqes))) {
2089 trace_pci_nvme_err_startfail_sqent_too_large(
2090 NVME_CC_IOSQES(n->bar.cc),
2091 NVME_CTRL_SQES_MAX(n->bar.cap));
2092 return -1;
2094 if (unlikely(!NVME_AQA_ASQS(n->bar.aqa))) {
2095 trace_pci_nvme_err_startfail_asqent_sz_zero();
2096 return -1;
2098 if (unlikely(!NVME_AQA_ACQS(n->bar.aqa))) {
2099 trace_pci_nvme_err_startfail_acqent_sz_zero();
2100 return -1;
2103 n->page_bits = page_bits;
2104 n->page_size = page_size;
2105 n->max_prp_ents = n->page_size / sizeof(uint64_t);
2106 n->cqe_size = 1 << NVME_CC_IOCQES(n->bar.cc);
2107 n->sqe_size = 1 << NVME_CC_IOSQES(n->bar.cc);
2108 nvme_init_cq(&n->admin_cq, n, n->bar.acq, 0, 0,
2109 NVME_AQA_ACQS(n->bar.aqa) + 1, 1);
2110 nvme_init_sq(&n->admin_sq, n, n->bar.asq, 0, 0,
2111 NVME_AQA_ASQS(n->bar.aqa) + 1);
2113 nvme_set_timestamp(n, 0ULL);
2115 QTAILQ_INIT(&n->aer_queue);
2117 return 0;
2120 static void nvme_write_bar(NvmeCtrl *n, hwaddr offset, uint64_t data,
2121 unsigned size)
2123 if (unlikely(offset & (sizeof(uint32_t) - 1))) {
2124 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_misaligned32,
2125 "MMIO write not 32-bit aligned,"
2126 " offset=0x%"PRIx64"", offset);
2127 /* should be ignored, fall through for now */
2130 if (unlikely(size < sizeof(uint32_t))) {
2131 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_toosmall,
2132 "MMIO write smaller than 32-bits,"
2133 " offset=0x%"PRIx64", size=%u",
2134 offset, size);
2135 /* should be ignored, fall through for now */
2138 switch (offset) {
2139 case 0xc: /* INTMS */
2140 if (unlikely(msix_enabled(&(n->parent_obj)))) {
2141 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_intmask_with_msix,
2142 "undefined access to interrupt mask set"
2143 " when MSI-X is enabled");
2144 /* should be ignored, fall through for now */
2146 n->bar.intms |= data & 0xffffffff;
2147 n->bar.intmc = n->bar.intms;
2148 trace_pci_nvme_mmio_intm_set(data & 0xffffffff, n->bar.intmc);
2149 nvme_irq_check(n);
2150 break;
2151 case 0x10: /* INTMC */
2152 if (unlikely(msix_enabled(&(n->parent_obj)))) {
2153 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_intmask_with_msix,
2154 "undefined access to interrupt mask clr"
2155 " when MSI-X is enabled");
2156 /* should be ignored, fall through for now */
2158 n->bar.intms &= ~(data & 0xffffffff);
2159 n->bar.intmc = n->bar.intms;
2160 trace_pci_nvme_mmio_intm_clr(data & 0xffffffff, n->bar.intmc);
2161 nvme_irq_check(n);
2162 break;
2163 case 0x14: /* CC */
2164 trace_pci_nvme_mmio_cfg(data & 0xffffffff);
2165 /* Windows first sends data, then sends enable bit */
2166 if (!NVME_CC_EN(data) && !NVME_CC_EN(n->bar.cc) &&
2167 !NVME_CC_SHN(data) && !NVME_CC_SHN(n->bar.cc))
2169 n->bar.cc = data;
2172 if (NVME_CC_EN(data) && !NVME_CC_EN(n->bar.cc)) {
2173 n->bar.cc = data;
2174 if (unlikely(nvme_start_ctrl(n))) {
2175 trace_pci_nvme_err_startfail();
2176 n->bar.csts = NVME_CSTS_FAILED;
2177 } else {
2178 trace_pci_nvme_mmio_start_success();
2179 n->bar.csts = NVME_CSTS_READY;
2181 } else if (!NVME_CC_EN(data) && NVME_CC_EN(n->bar.cc)) {
2182 trace_pci_nvme_mmio_stopped();
2183 nvme_clear_ctrl(n);
2184 n->bar.csts &= ~NVME_CSTS_READY;
2186 if (NVME_CC_SHN(data) && !(NVME_CC_SHN(n->bar.cc))) {
2187 trace_pci_nvme_mmio_shutdown_set();
2188 nvme_clear_ctrl(n);
2189 n->bar.cc = data;
2190 n->bar.csts |= NVME_CSTS_SHST_COMPLETE;
2191 } else if (!NVME_CC_SHN(data) && NVME_CC_SHN(n->bar.cc)) {
2192 trace_pci_nvme_mmio_shutdown_cleared();
2193 n->bar.csts &= ~NVME_CSTS_SHST_COMPLETE;
2194 n->bar.cc = data;
2196 break;
2197 case 0x1C: /* CSTS */
2198 if (data & (1 << 4)) {
2199 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_ssreset_w1c_unsupported,
2200 "attempted to W1C CSTS.NSSRO"
2201 " but CAP.NSSRS is zero (not supported)");
2202 } else if (data != 0) {
2203 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_ro_csts,
2204 "attempted to set a read only bit"
2205 " of controller status");
2207 break;
2208 case 0x20: /* NSSR */
2209 if (data == 0x4E564D65) {
2210 trace_pci_nvme_ub_mmiowr_ssreset_unsupported();
2211 } else {
2212 /* The spec says that writes of other values have no effect */
2213 return;
2215 break;
2216 case 0x24: /* AQA */
2217 n->bar.aqa = data & 0xffffffff;
2218 trace_pci_nvme_mmio_aqattr(data & 0xffffffff);
2219 break;
2220 case 0x28: /* ASQ */
2221 n->bar.asq = data;
2222 trace_pci_nvme_mmio_asqaddr(data);
2223 break;
2224 case 0x2c: /* ASQ hi */
2225 n->bar.asq |= data << 32;
2226 trace_pci_nvme_mmio_asqaddr_hi(data, n->bar.asq);
2227 break;
2228 case 0x30: /* ACQ */
2229 trace_pci_nvme_mmio_acqaddr(data);
2230 n->bar.acq = data;
2231 break;
2232 case 0x34: /* ACQ hi */
2233 n->bar.acq |= data << 32;
2234 trace_pci_nvme_mmio_acqaddr_hi(data, n->bar.acq);
2235 break;
2236 case 0x38: /* CMBLOC */
2237 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_cmbloc_reserved,
2238 "invalid write to reserved CMBLOC"
2239 " when CMBSZ is zero, ignored");
2240 return;
2241 case 0x3C: /* CMBSZ */
2242 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_cmbsz_readonly,
2243 "invalid write to read only CMBSZ, ignored");
2244 return;
2245 case 0xE00: /* PMRCAP */
2246 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrcap_readonly,
2247 "invalid write to PMRCAP register, ignored");
2248 return;
2249 case 0xE04: /* TODO PMRCTL */
2250 break;
2251 case 0xE08: /* PMRSTS */
2252 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrsts_readonly,
2253 "invalid write to PMRSTS register, ignored");
2254 return;
2255 case 0xE0C: /* PMREBS */
2256 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrebs_readonly,
2257 "invalid write to PMREBS register, ignored");
2258 return;
2259 case 0xE10: /* PMRSWTP */
2260 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrswtp_readonly,
2261 "invalid write to PMRSWTP register, ignored");
2262 return;
2263 case 0xE14: /* TODO PMRMSC */
2264 break;
2265 default:
2266 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_invalid,
2267 "invalid MMIO write,"
2268 " offset=0x%"PRIx64", data=%"PRIx64"",
2269 offset, data);
2270 break;
2274 static uint64_t nvme_mmio_read(void *opaque, hwaddr addr, unsigned size)
2276 NvmeCtrl *n = (NvmeCtrl *)opaque;
2277 uint8_t *ptr = (uint8_t *)&n->bar;
2278 uint64_t val = 0;
2280 trace_pci_nvme_mmio_read(addr);
2282 if (unlikely(addr & (sizeof(uint32_t) - 1))) {
2283 NVME_GUEST_ERR(pci_nvme_ub_mmiord_misaligned32,
2284 "MMIO read not 32-bit aligned,"
2285 " offset=0x%"PRIx64"", addr);
2286 /* should RAZ, fall through for now */
2287 } else if (unlikely(size < sizeof(uint32_t))) {
2288 NVME_GUEST_ERR(pci_nvme_ub_mmiord_toosmall,
2289 "MMIO read smaller than 32-bits,"
2290 " offset=0x%"PRIx64"", addr);
2291 /* should RAZ, fall through for now */
2294 if (addr < sizeof(n->bar)) {
2296 * When PMRWBM bit 1 is set then read from
2297 * from PMRSTS should ensure prior writes
2298 * made it to persistent media
2300 if (addr == 0xE08 &&
2301 (NVME_PMRCAP_PMRWBM(n->bar.pmrcap) & 0x02)) {
2302 memory_region_msync(&n->pmrdev->mr, 0, n->pmrdev->size);
2304 memcpy(&val, ptr + addr, size);
2305 } else {
2306 NVME_GUEST_ERR(pci_nvme_ub_mmiord_invalid_ofs,
2307 "MMIO read beyond last register,"
2308 " offset=0x%"PRIx64", returning 0", addr);
2311 return val;
2314 static void nvme_process_db(NvmeCtrl *n, hwaddr addr, int val)
2316 uint32_t qid;
2318 if (unlikely(addr & ((1 << 2) - 1))) {
2319 NVME_GUEST_ERR(pci_nvme_ub_db_wr_misaligned,
2320 "doorbell write not 32-bit aligned,"
2321 " offset=0x%"PRIx64", ignoring", addr);
2322 return;
2325 if (((addr - 0x1000) >> 2) & 1) {
2326 /* Completion queue doorbell write */
2328 uint16_t new_head = val & 0xffff;
2329 int start_sqs;
2330 NvmeCQueue *cq;
2332 qid = (addr - (0x1000 + (1 << 2))) >> 3;
2333 if (unlikely(nvme_check_cqid(n, qid))) {
2334 NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_cq,
2335 "completion queue doorbell write"
2336 " for nonexistent queue,"
2337 " sqid=%"PRIu32", ignoring", qid);
2340 * NVM Express v1.3d, Section 4.1 state: "If host software writes
2341 * an invalid value to the Submission Queue Tail Doorbell or
2342 * Completion Queue Head Doorbell regiter and an Asynchronous Event
2343 * Request command is outstanding, then an asynchronous event is
2344 * posted to the Admin Completion Queue with a status code of
2345 * Invalid Doorbell Write Value."
2347 * Also note that the spec includes the "Invalid Doorbell Register"
2348 * status code, but nowhere does it specify when to use it.
2349 * However, it seems reasonable to use it here in a similar
2350 * fashion.
2352 if (n->outstanding_aers) {
2353 nvme_enqueue_event(n, NVME_AER_TYPE_ERROR,
2354 NVME_AER_INFO_ERR_INVALID_DB_REGISTER,
2355 NVME_LOG_ERROR_INFO);
2358 return;
2361 cq = n->cq[qid];
2362 if (unlikely(new_head >= cq->size)) {
2363 NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_cqhead,
2364 "completion queue doorbell write value"
2365 " beyond queue size, sqid=%"PRIu32","
2366 " new_head=%"PRIu16", ignoring",
2367 qid, new_head);
2369 if (n->outstanding_aers) {
2370 nvme_enqueue_event(n, NVME_AER_TYPE_ERROR,
2371 NVME_AER_INFO_ERR_INVALID_DB_VALUE,
2372 NVME_LOG_ERROR_INFO);
2375 return;
2378 trace_pci_nvme_mmio_doorbell_cq(cq->cqid, new_head);
2380 start_sqs = nvme_cq_full(cq) ? 1 : 0;
2381 cq->head = new_head;
2382 if (start_sqs) {
2383 NvmeSQueue *sq;
2384 QTAILQ_FOREACH(sq, &cq->sq_list, entry) {
2385 timer_mod(sq->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 500);
2387 timer_mod(cq->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 500);
2390 if (cq->tail == cq->head) {
2391 nvme_irq_deassert(n, cq);
2393 } else {
2394 /* Submission queue doorbell write */
2396 uint16_t new_tail = val & 0xffff;
2397 NvmeSQueue *sq;
2399 qid = (addr - 0x1000) >> 3;
2400 if (unlikely(nvme_check_sqid(n, qid))) {
2401 NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_sq,
2402 "submission queue doorbell write"
2403 " for nonexistent queue,"
2404 " sqid=%"PRIu32", ignoring", qid);
2406 if (n->outstanding_aers) {
2407 nvme_enqueue_event(n, NVME_AER_TYPE_ERROR,
2408 NVME_AER_INFO_ERR_INVALID_DB_REGISTER,
2409 NVME_LOG_ERROR_INFO);
2412 return;
2415 sq = n->sq[qid];
2416 if (unlikely(new_tail >= sq->size)) {
2417 NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_sqtail,
2418 "submission queue doorbell write value"
2419 " beyond queue size, sqid=%"PRIu32","
2420 " new_tail=%"PRIu16", ignoring",
2421 qid, new_tail);
2423 if (n->outstanding_aers) {
2424 nvme_enqueue_event(n, NVME_AER_TYPE_ERROR,
2425 NVME_AER_INFO_ERR_INVALID_DB_VALUE,
2426 NVME_LOG_ERROR_INFO);
2429 return;
2432 trace_pci_nvme_mmio_doorbell_sq(sq->sqid, new_tail);
2434 sq->tail = new_tail;
2435 timer_mod(sq->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 500);
2439 static void nvme_mmio_write(void *opaque, hwaddr addr, uint64_t data,
2440 unsigned size)
2442 NvmeCtrl *n = (NvmeCtrl *)opaque;
2444 trace_pci_nvme_mmio_write(addr, data);
2446 if (addr < sizeof(n->bar)) {
2447 nvme_write_bar(n, addr, data, size);
2448 } else {
2449 nvme_process_db(n, addr, data);
2453 static const MemoryRegionOps nvme_mmio_ops = {
2454 .read = nvme_mmio_read,
2455 .write = nvme_mmio_write,
2456 .endianness = DEVICE_LITTLE_ENDIAN,
2457 .impl = {
2458 .min_access_size = 2,
2459 .max_access_size = 8,
2463 static void nvme_cmb_write(void *opaque, hwaddr addr, uint64_t data,
2464 unsigned size)
2466 NvmeCtrl *n = (NvmeCtrl *)opaque;
2467 stn_le_p(&n->cmbuf[addr], size, data);
2470 static uint64_t nvme_cmb_read(void *opaque, hwaddr addr, unsigned size)
2472 NvmeCtrl *n = (NvmeCtrl *)opaque;
2473 return ldn_le_p(&n->cmbuf[addr], size);
2476 static const MemoryRegionOps nvme_cmb_ops = {
2477 .read = nvme_cmb_read,
2478 .write = nvme_cmb_write,
2479 .endianness = DEVICE_LITTLE_ENDIAN,
2480 .impl = {
2481 .min_access_size = 1,
2482 .max_access_size = 8,
2486 static void nvme_check_constraints(NvmeCtrl *n, Error **errp)
2488 NvmeParams *params = &n->params;
2490 if (params->num_queues) {
2491 warn_report("num_queues is deprecated; please use max_ioqpairs "
2492 "instead");
2494 params->max_ioqpairs = params->num_queues - 1;
2497 if (n->conf.blk) {
2498 warn_report("drive property is deprecated; "
2499 "please use an nvme-ns device instead");
2502 if (params->max_ioqpairs < 1 ||
2503 params->max_ioqpairs > NVME_MAX_IOQPAIRS) {
2504 error_setg(errp, "max_ioqpairs must be between 1 and %d",
2505 NVME_MAX_IOQPAIRS);
2506 return;
2509 if (params->msix_qsize < 1 ||
2510 params->msix_qsize > PCI_MSIX_FLAGS_QSIZE + 1) {
2511 error_setg(errp, "msix_qsize must be between 1 and %d",
2512 PCI_MSIX_FLAGS_QSIZE + 1);
2513 return;
2516 if (!params->serial) {
2517 error_setg(errp, "serial property not set");
2518 return;
2521 if (!n->params.cmb_size_mb && n->pmrdev) {
2522 if (host_memory_backend_is_mapped(n->pmrdev)) {
2523 error_setg(errp, "can't use already busy memdev: %s",
2524 object_get_canonical_path_component(OBJECT(n->pmrdev)));
2525 return;
2528 if (!is_power_of_2(n->pmrdev->size)) {
2529 error_setg(errp, "pmr backend size needs to be power of 2 in size");
2530 return;
2533 host_memory_backend_set_mapped(n->pmrdev, true);
2537 static void nvme_init_state(NvmeCtrl *n)
2539 n->num_namespaces = NVME_MAX_NAMESPACES;
2540 /* add one to max_ioqpairs to account for the admin queue pair */
2541 n->reg_size = pow2ceil(sizeof(NvmeBar) +
2542 2 * (n->params.max_ioqpairs + 1) * NVME_DB_SIZE);
2543 n->sq = g_new0(NvmeSQueue *, n->params.max_ioqpairs + 1);
2544 n->cq = g_new0(NvmeCQueue *, n->params.max_ioqpairs + 1);
2545 n->temperature = NVME_TEMPERATURE;
2546 n->features.temp_thresh_hi = NVME_TEMPERATURE_WARNING;
2547 n->starttime_ms = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL);
2548 n->aer_reqs = g_new0(NvmeRequest *, n->params.aerl + 1);
2551 int nvme_register_namespace(NvmeCtrl *n, NvmeNamespace *ns, Error **errp)
2553 uint32_t nsid = nvme_nsid(ns);
2555 if (nsid > NVME_MAX_NAMESPACES) {
2556 error_setg(errp, "invalid namespace id (must be between 0 and %d)",
2557 NVME_MAX_NAMESPACES);
2558 return -1;
2561 if (!nsid) {
2562 for (int i = 1; i <= n->num_namespaces; i++) {
2563 if (!nvme_ns(n, i)) {
2564 nsid = ns->params.nsid = i;
2565 break;
2569 if (!nsid) {
2570 error_setg(errp, "no free namespace id");
2571 return -1;
2573 } else {
2574 if (n->namespaces[nsid - 1]) {
2575 error_setg(errp, "namespace id '%d' is already in use", nsid);
2576 return -1;
2580 trace_pci_nvme_register_namespace(nsid);
2582 n->namespaces[nsid - 1] = ns;
2584 return 0;
2587 static void nvme_init_cmb(NvmeCtrl *n, PCIDevice *pci_dev)
2589 NVME_CMBLOC_SET_BIR(n->bar.cmbloc, NVME_CMB_BIR);
2590 NVME_CMBLOC_SET_OFST(n->bar.cmbloc, 0);
2592 NVME_CMBSZ_SET_SQS(n->bar.cmbsz, 1);
2593 NVME_CMBSZ_SET_CQS(n->bar.cmbsz, 0);
2594 NVME_CMBSZ_SET_LISTS(n->bar.cmbsz, 1);
2595 NVME_CMBSZ_SET_RDS(n->bar.cmbsz, 1);
2596 NVME_CMBSZ_SET_WDS(n->bar.cmbsz, 1);
2597 NVME_CMBSZ_SET_SZU(n->bar.cmbsz, 2); /* MBs */
2598 NVME_CMBSZ_SET_SZ(n->bar.cmbsz, n->params.cmb_size_mb);
2600 n->cmbuf = g_malloc0(NVME_CMBSZ_GETSIZE(n->bar.cmbsz));
2601 memory_region_init_io(&n->ctrl_mem, OBJECT(n), &nvme_cmb_ops, n,
2602 "nvme-cmb", NVME_CMBSZ_GETSIZE(n->bar.cmbsz));
2603 pci_register_bar(pci_dev, NVME_CMBLOC_BIR(n->bar.cmbloc),
2604 PCI_BASE_ADDRESS_SPACE_MEMORY |
2605 PCI_BASE_ADDRESS_MEM_TYPE_64 |
2606 PCI_BASE_ADDRESS_MEM_PREFETCH, &n->ctrl_mem);
2609 static void nvme_init_pmr(NvmeCtrl *n, PCIDevice *pci_dev)
2611 /* Controller Capabilities register */
2612 NVME_CAP_SET_PMRS(n->bar.cap, 1);
2614 /* PMR Capabities register */
2615 n->bar.pmrcap = 0;
2616 NVME_PMRCAP_SET_RDS(n->bar.pmrcap, 0);
2617 NVME_PMRCAP_SET_WDS(n->bar.pmrcap, 0);
2618 NVME_PMRCAP_SET_BIR(n->bar.pmrcap, NVME_PMR_BIR);
2619 NVME_PMRCAP_SET_PMRTU(n->bar.pmrcap, 0);
2620 /* Turn on bit 1 support */
2621 NVME_PMRCAP_SET_PMRWBM(n->bar.pmrcap, 0x02);
2622 NVME_PMRCAP_SET_PMRTO(n->bar.pmrcap, 0);
2623 NVME_PMRCAP_SET_CMSS(n->bar.pmrcap, 0);
2625 /* PMR Control register */
2626 n->bar.pmrctl = 0;
2627 NVME_PMRCTL_SET_EN(n->bar.pmrctl, 0);
2629 /* PMR Status register */
2630 n->bar.pmrsts = 0;
2631 NVME_PMRSTS_SET_ERR(n->bar.pmrsts, 0);
2632 NVME_PMRSTS_SET_NRDY(n->bar.pmrsts, 0);
2633 NVME_PMRSTS_SET_HSTS(n->bar.pmrsts, 0);
2634 NVME_PMRSTS_SET_CBAI(n->bar.pmrsts, 0);
2636 /* PMR Elasticity Buffer Size register */
2637 n->bar.pmrebs = 0;
2638 NVME_PMREBS_SET_PMRSZU(n->bar.pmrebs, 0);
2639 NVME_PMREBS_SET_RBB(n->bar.pmrebs, 0);
2640 NVME_PMREBS_SET_PMRWBZ(n->bar.pmrebs, 0);
2642 /* PMR Sustained Write Throughput register */
2643 n->bar.pmrswtp = 0;
2644 NVME_PMRSWTP_SET_PMRSWTU(n->bar.pmrswtp, 0);
2645 NVME_PMRSWTP_SET_PMRSWTV(n->bar.pmrswtp, 0);
2647 /* PMR Memory Space Control register */
2648 n->bar.pmrmsc = 0;
2649 NVME_PMRMSC_SET_CMSE(n->bar.pmrmsc, 0);
2650 NVME_PMRMSC_SET_CBA(n->bar.pmrmsc, 0);
2652 pci_register_bar(pci_dev, NVME_PMRCAP_BIR(n->bar.pmrcap),
2653 PCI_BASE_ADDRESS_SPACE_MEMORY |
2654 PCI_BASE_ADDRESS_MEM_TYPE_64 |
2655 PCI_BASE_ADDRESS_MEM_PREFETCH, &n->pmrdev->mr);
2658 static void nvme_init_pci(NvmeCtrl *n, PCIDevice *pci_dev, Error **errp)
2660 uint8_t *pci_conf = pci_dev->config;
2662 pci_conf[PCI_INTERRUPT_PIN] = 1;
2663 pci_config_set_prog_interface(pci_conf, 0x2);
2665 if (n->params.use_intel_id) {
2666 pci_config_set_vendor_id(pci_conf, PCI_VENDOR_ID_INTEL);
2667 pci_config_set_device_id(pci_conf, 0x5845);
2668 } else {
2669 pci_config_set_vendor_id(pci_conf, PCI_VENDOR_ID_REDHAT);
2670 pci_config_set_device_id(pci_conf, PCI_DEVICE_ID_REDHAT_NVME);
2673 pci_config_set_class(pci_conf, PCI_CLASS_STORAGE_EXPRESS);
2674 pcie_endpoint_cap_init(pci_dev, 0x80);
2676 memory_region_init_io(&n->iomem, OBJECT(n), &nvme_mmio_ops, n, "nvme",
2677 n->reg_size);
2678 pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY |
2679 PCI_BASE_ADDRESS_MEM_TYPE_64, &n->iomem);
2680 if (msix_init_exclusive_bar(pci_dev, n->params.msix_qsize, 4, errp)) {
2681 return;
2684 if (n->params.cmb_size_mb) {
2685 nvme_init_cmb(n, pci_dev);
2686 } else if (n->pmrdev) {
2687 nvme_init_pmr(n, pci_dev);
2691 static void nvme_init_ctrl(NvmeCtrl *n, PCIDevice *pci_dev)
2693 NvmeIdCtrl *id = &n->id_ctrl;
2694 uint8_t *pci_conf = pci_dev->config;
2695 char *subnqn;
2697 id->vid = cpu_to_le16(pci_get_word(pci_conf + PCI_VENDOR_ID));
2698 id->ssvid = cpu_to_le16(pci_get_word(pci_conf + PCI_SUBSYSTEM_VENDOR_ID));
2699 strpadcpy((char *)id->mn, sizeof(id->mn), "QEMU NVMe Ctrl", ' ');
2700 strpadcpy((char *)id->fr, sizeof(id->fr), "1.0", ' ');
2701 strpadcpy((char *)id->sn, sizeof(id->sn), n->params.serial, ' ');
2702 id->rab = 6;
2703 id->ieee[0] = 0x00;
2704 id->ieee[1] = 0x02;
2705 id->ieee[2] = 0xb3;
2706 id->mdts = n->params.mdts;
2707 id->ver = cpu_to_le32(NVME_SPEC_VER);
2708 id->oacs = cpu_to_le16(0);
2711 * Because the controller always completes the Abort command immediately,
2712 * there can never be more than one concurrently executing Abort command,
2713 * so this value is never used for anything. Note that there can easily be
2714 * many Abort commands in the queues, but they are not considered
2715 * "executing" until processed by nvme_abort.
2717 * The specification recommends a value of 3 for Abort Command Limit (four
2718 * concurrently outstanding Abort commands), so lets use that though it is
2719 * inconsequential.
2721 id->acl = 3;
2722 id->aerl = n->params.aerl;
2723 id->frmw = (NVME_NUM_FW_SLOTS << 1) | NVME_FRMW_SLOT1_RO;
2724 id->lpa = NVME_LPA_NS_SMART | NVME_LPA_EXTENDED;
2726 /* recommended default value (~70 C) */
2727 id->wctemp = cpu_to_le16(NVME_TEMPERATURE_WARNING);
2728 id->cctemp = cpu_to_le16(NVME_TEMPERATURE_CRITICAL);
2730 id->sqes = (0x6 << 4) | 0x6;
2731 id->cqes = (0x4 << 4) | 0x4;
2732 id->nn = cpu_to_le32(n->num_namespaces);
2733 id->oncs = cpu_to_le16(NVME_ONCS_WRITE_ZEROES | NVME_ONCS_TIMESTAMP |
2734 NVME_ONCS_FEATURES);
2736 id->vwc = 0x1;
2737 id->sgls = cpu_to_le32(NVME_CTRL_SGLS_SUPPORT_NO_ALIGN |
2738 NVME_CTRL_SGLS_BITBUCKET);
2740 subnqn = g_strdup_printf("nqn.2019-08.org.qemu:%s", n->params.serial);
2741 strpadcpy((char *)id->subnqn, sizeof(id->subnqn), subnqn, '\0');
2742 g_free(subnqn);
2744 id->psd[0].mp = cpu_to_le16(0x9c4);
2745 id->psd[0].enlat = cpu_to_le32(0x10);
2746 id->psd[0].exlat = cpu_to_le32(0x4);
2748 n->bar.cap = 0;
2749 NVME_CAP_SET_MQES(n->bar.cap, 0x7ff);
2750 NVME_CAP_SET_CQR(n->bar.cap, 1);
2751 NVME_CAP_SET_TO(n->bar.cap, 0xf);
2752 NVME_CAP_SET_CSS(n->bar.cap, NVME_CAP_CSS_NVM);
2753 NVME_CAP_SET_CSS(n->bar.cap, NVME_CAP_CSS_ADMIN_ONLY);
2754 NVME_CAP_SET_MPSMAX(n->bar.cap, 4);
2756 n->bar.vs = NVME_SPEC_VER;
2757 n->bar.intmc = n->bar.intms = 0;
2760 static void nvme_realize(PCIDevice *pci_dev, Error **errp)
2762 NvmeCtrl *n = NVME(pci_dev);
2763 NvmeNamespace *ns;
2764 Error *local_err = NULL;
2766 nvme_check_constraints(n, &local_err);
2767 if (local_err) {
2768 error_propagate(errp, local_err);
2769 return;
2772 qbus_create_inplace(&n->bus, sizeof(NvmeBus), TYPE_NVME_BUS,
2773 &pci_dev->qdev, n->parent_obj.qdev.id);
2775 nvme_init_state(n);
2776 nvme_init_pci(n, pci_dev, &local_err);
2777 if (local_err) {
2778 error_propagate(errp, local_err);
2779 return;
2782 nvme_init_ctrl(n, pci_dev);
2784 /* setup a namespace if the controller drive property was given */
2785 if (n->namespace.blkconf.blk) {
2786 ns = &n->namespace;
2787 ns->params.nsid = 1;
2789 if (nvme_ns_setup(n, ns, errp)) {
2790 return;
2795 static void nvme_exit(PCIDevice *pci_dev)
2797 NvmeCtrl *n = NVME(pci_dev);
2799 nvme_clear_ctrl(n);
2800 g_free(n->cq);
2801 g_free(n->sq);
2802 g_free(n->aer_reqs);
2804 if (n->params.cmb_size_mb) {
2805 g_free(n->cmbuf);
2808 if (n->pmrdev) {
2809 host_memory_backend_set_mapped(n->pmrdev, false);
2811 msix_uninit_exclusive_bar(pci_dev);
2814 static Property nvme_props[] = {
2815 DEFINE_BLOCK_PROPERTIES(NvmeCtrl, namespace.blkconf),
2816 DEFINE_PROP_LINK("pmrdev", NvmeCtrl, pmrdev, TYPE_MEMORY_BACKEND,
2817 HostMemoryBackend *),
2818 DEFINE_PROP_STRING("serial", NvmeCtrl, params.serial),
2819 DEFINE_PROP_UINT32("cmb_size_mb", NvmeCtrl, params.cmb_size_mb, 0),
2820 DEFINE_PROP_UINT32("num_queues", NvmeCtrl, params.num_queues, 0),
2821 DEFINE_PROP_UINT32("max_ioqpairs", NvmeCtrl, params.max_ioqpairs, 64),
2822 DEFINE_PROP_UINT16("msix_qsize", NvmeCtrl, params.msix_qsize, 65),
2823 DEFINE_PROP_UINT8("aerl", NvmeCtrl, params.aerl, 3),
2824 DEFINE_PROP_UINT32("aer_max_queued", NvmeCtrl, params.aer_max_queued, 64),
2825 DEFINE_PROP_UINT8("mdts", NvmeCtrl, params.mdts, 7),
2826 DEFINE_PROP_BOOL("use-intel-id", NvmeCtrl, params.use_intel_id, false),
2827 DEFINE_PROP_END_OF_LIST(),
2830 static const VMStateDescription nvme_vmstate = {
2831 .name = "nvme",
2832 .unmigratable = 1,
2835 static void nvme_class_init(ObjectClass *oc, void *data)
2837 DeviceClass *dc = DEVICE_CLASS(oc);
2838 PCIDeviceClass *pc = PCI_DEVICE_CLASS(oc);
2840 pc->realize = nvme_realize;
2841 pc->exit = nvme_exit;
2842 pc->class_id = PCI_CLASS_STORAGE_EXPRESS;
2843 pc->revision = 2;
2845 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
2846 dc->desc = "Non-Volatile Memory Express";
2847 device_class_set_props(dc, nvme_props);
2848 dc->vmsd = &nvme_vmstate;
2851 static void nvme_instance_init(Object *obj)
2853 NvmeCtrl *s = NVME(obj);
2855 if (s->namespace.blkconf.blk) {
2856 device_add_bootindex_property(obj, &s->namespace.blkconf.bootindex,
2857 "bootindex", "/namespace@1,0",
2858 DEVICE(obj));
2862 static const TypeInfo nvme_info = {
2863 .name = TYPE_NVME,
2864 .parent = TYPE_PCI_DEVICE,
2865 .instance_size = sizeof(NvmeCtrl),
2866 .instance_init = nvme_instance_init,
2867 .class_init = nvme_class_init,
2868 .interfaces = (InterfaceInfo[]) {
2869 { INTERFACE_PCIE_DEVICE },
2874 static const TypeInfo nvme_bus_info = {
2875 .name = TYPE_NVME_BUS,
2876 .parent = TYPE_BUS,
2877 .instance_size = sizeof(NvmeBus),
2880 static void nvme_register_types(void)
2882 type_register_static(&nvme_info);
2883 type_register_static(&nvme_bus_info);
2886 type_init(nvme_register_types)