io: include full error message in websocket handshake trace
[qemu/ar7.git] / hw / block / nvme.c
blob9aa32692a3b5f4f3995fa2bc6146082bf38ba9b8
1 /*
2 * QEMU NVM Express Controller
4 * Copyright (c) 2012, Intel Corporation
6 * Written by Keith Busch <keith.busch@intel.com>
8 * This code is licensed under the GNU GPL v2 or later.
9 */
11 /**
12 * Reference Specs: http://www.nvmexpress.org, 1.2, 1.1, 1.0e
14 * http://www.nvmexpress.org/resources/
17 /**
18 * Usage: add options:
19 * -drive file=<file>,if=none,id=<drive_id>
20 * -device nvme,drive=<drive_id>,serial=<serial>,id=<id[optional]>, \
21 * cmb_size_mb=<cmb_size_mb[optional]>
23 * Note cmb_size_mb denotes size of CMB in MB. CMB is assumed to be at
24 * offset 0 in BAR2 and supports only WDS, RDS and SQS for now.
27 #include "qemu/osdep.h"
28 #include "hw/block/block.h"
29 #include "hw/hw.h"
30 #include "hw/pci/msix.h"
31 #include "hw/pci/pci.h"
32 #include "sysemu/sysemu.h"
33 #include "qapi/error.h"
34 #include "qapi/visitor.h"
35 #include "sysemu/block-backend.h"
37 #include "nvme.h"
39 static void nvme_process_sq(void *opaque);
41 static void nvme_addr_read(NvmeCtrl *n, hwaddr addr, void *buf, int size)
43 if (n->cmbsz && addr >= n->ctrl_mem.addr &&
44 addr < (n->ctrl_mem.addr + int128_get64(n->ctrl_mem.size))) {
45 memcpy(buf, (void *)&n->cmbuf[addr - n->ctrl_mem.addr], size);
46 } else {
47 pci_dma_read(&n->parent_obj, addr, buf, size);
51 static int nvme_check_sqid(NvmeCtrl *n, uint16_t sqid)
53 return sqid < n->num_queues && n->sq[sqid] != NULL ? 0 : -1;
56 static int nvme_check_cqid(NvmeCtrl *n, uint16_t cqid)
58 return cqid < n->num_queues && n->cq[cqid] != NULL ? 0 : -1;
61 static void nvme_inc_cq_tail(NvmeCQueue *cq)
63 cq->tail++;
64 if (cq->tail >= cq->size) {
65 cq->tail = 0;
66 cq->phase = !cq->phase;
70 static void nvme_inc_sq_head(NvmeSQueue *sq)
72 sq->head = (sq->head + 1) % sq->size;
75 static uint8_t nvme_cq_full(NvmeCQueue *cq)
77 return (cq->tail + 1) % cq->size == cq->head;
80 static uint8_t nvme_sq_empty(NvmeSQueue *sq)
82 return sq->head == sq->tail;
85 static void nvme_isr_notify(NvmeCtrl *n, NvmeCQueue *cq)
87 if (cq->irq_enabled) {
88 if (msix_enabled(&(n->parent_obj))) {
89 msix_notify(&(n->parent_obj), cq->vector);
90 } else {
91 pci_irq_pulse(&n->parent_obj);
96 static uint16_t nvme_map_prp(QEMUSGList *qsg, QEMUIOVector *iov, uint64_t prp1,
97 uint64_t prp2, uint32_t len, NvmeCtrl *n)
99 hwaddr trans_len = n->page_size - (prp1 % n->page_size);
100 trans_len = MIN(len, trans_len);
101 int num_prps = (len >> n->page_bits) + 1;
103 if (!prp1) {
104 return NVME_INVALID_FIELD | NVME_DNR;
105 } else if (n->cmbsz && prp1 >= n->ctrl_mem.addr &&
106 prp1 < n->ctrl_mem.addr + int128_get64(n->ctrl_mem.size)) {
107 qsg->nsg = 0;
108 qemu_iovec_init(iov, num_prps);
109 qemu_iovec_add(iov, (void *)&n->cmbuf[prp1 - n->ctrl_mem.addr], trans_len);
110 } else {
111 pci_dma_sglist_init(qsg, &n->parent_obj, num_prps);
112 qemu_sglist_add(qsg, prp1, trans_len);
114 len -= trans_len;
115 if (len) {
116 if (!prp2) {
117 goto unmap;
119 if (len > n->page_size) {
120 uint64_t prp_list[n->max_prp_ents];
121 uint32_t nents, prp_trans;
122 int i = 0;
124 nents = (len + n->page_size - 1) >> n->page_bits;
125 prp_trans = MIN(n->max_prp_ents, nents) * sizeof(uint64_t);
126 nvme_addr_read(n, prp2, (void *)prp_list, prp_trans);
127 while (len != 0) {
128 uint64_t prp_ent = le64_to_cpu(prp_list[i]);
130 if (i == n->max_prp_ents - 1 && len > n->page_size) {
131 if (!prp_ent || prp_ent & (n->page_size - 1)) {
132 goto unmap;
135 i = 0;
136 nents = (len + n->page_size - 1) >> n->page_bits;
137 prp_trans = MIN(n->max_prp_ents, nents) * sizeof(uint64_t);
138 nvme_addr_read(n, prp_ent, (void *)prp_list,
139 prp_trans);
140 prp_ent = le64_to_cpu(prp_list[i]);
143 if (!prp_ent || prp_ent & (n->page_size - 1)) {
144 goto unmap;
147 trans_len = MIN(len, n->page_size);
148 if (qsg->nsg){
149 qemu_sglist_add(qsg, prp_ent, trans_len);
150 } else {
151 qemu_iovec_add(iov, (void *)&n->cmbuf[prp_ent - n->ctrl_mem.addr], trans_len);
153 len -= trans_len;
154 i++;
156 } else {
157 if (prp2 & (n->page_size - 1)) {
158 goto unmap;
160 if (qsg->nsg) {
161 qemu_sglist_add(qsg, prp2, len);
162 } else {
163 qemu_iovec_add(iov, (void *)&n->cmbuf[prp2 - n->ctrl_mem.addr], trans_len);
167 return NVME_SUCCESS;
169 unmap:
170 qemu_sglist_destroy(qsg);
171 return NVME_INVALID_FIELD | NVME_DNR;
174 static uint16_t nvme_dma_read_prp(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
175 uint64_t prp1, uint64_t prp2)
177 QEMUSGList qsg;
178 QEMUIOVector iov;
179 uint16_t status = NVME_SUCCESS;
181 if (nvme_map_prp(&qsg, &iov, prp1, prp2, len, n)) {
182 return NVME_INVALID_FIELD | NVME_DNR;
184 if (qsg.nsg > 0) {
185 if (dma_buf_read(ptr, len, &qsg)) {
186 status = NVME_INVALID_FIELD | NVME_DNR;
188 qemu_sglist_destroy(&qsg);
189 } else {
190 if (qemu_iovec_to_buf(&iov, 0, ptr, len) != len) {
191 status = NVME_INVALID_FIELD | NVME_DNR;
193 qemu_iovec_destroy(&iov);
195 return status;
198 static void nvme_post_cqes(void *opaque)
200 NvmeCQueue *cq = opaque;
201 NvmeCtrl *n = cq->ctrl;
202 NvmeRequest *req, *next;
204 QTAILQ_FOREACH_SAFE(req, &cq->req_list, entry, next) {
205 NvmeSQueue *sq;
206 hwaddr addr;
208 if (nvme_cq_full(cq)) {
209 break;
212 QTAILQ_REMOVE(&cq->req_list, req, entry);
213 sq = req->sq;
214 req->cqe.status = cpu_to_le16((req->status << 1) | cq->phase);
215 req->cqe.sq_id = cpu_to_le16(sq->sqid);
216 req->cqe.sq_head = cpu_to_le16(sq->head);
217 addr = cq->dma_addr + cq->tail * n->cqe_size;
218 nvme_inc_cq_tail(cq);
219 pci_dma_write(&n->parent_obj, addr, (void *)&req->cqe,
220 sizeof(req->cqe));
221 QTAILQ_INSERT_TAIL(&sq->req_list, req, entry);
223 nvme_isr_notify(n, cq);
226 static void nvme_enqueue_req_completion(NvmeCQueue *cq, NvmeRequest *req)
228 assert(cq->cqid == req->sq->cqid);
229 QTAILQ_REMOVE(&req->sq->out_req_list, req, entry);
230 QTAILQ_INSERT_TAIL(&cq->req_list, req, entry);
231 timer_mod(cq->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 500);
234 static void nvme_rw_cb(void *opaque, int ret)
236 NvmeRequest *req = opaque;
237 NvmeSQueue *sq = req->sq;
238 NvmeCtrl *n = sq->ctrl;
239 NvmeCQueue *cq = n->cq[sq->cqid];
241 if (!ret) {
242 block_acct_done(blk_get_stats(n->conf.blk), &req->acct);
243 req->status = NVME_SUCCESS;
244 } else {
245 block_acct_failed(blk_get_stats(n->conf.blk), &req->acct);
246 req->status = NVME_INTERNAL_DEV_ERROR;
248 if (req->has_sg) {
249 qemu_sglist_destroy(&req->qsg);
251 nvme_enqueue_req_completion(cq, req);
254 static uint16_t nvme_flush(NvmeCtrl *n, NvmeNamespace *ns, NvmeCmd *cmd,
255 NvmeRequest *req)
257 req->has_sg = false;
258 block_acct_start(blk_get_stats(n->conf.blk), &req->acct, 0,
259 BLOCK_ACCT_FLUSH);
260 req->aiocb = blk_aio_flush(n->conf.blk, nvme_rw_cb, req);
262 return NVME_NO_COMPLETE;
265 static uint16_t nvme_write_zeros(NvmeCtrl *n, NvmeNamespace *ns, NvmeCmd *cmd,
266 NvmeRequest *req)
268 NvmeRwCmd *rw = (NvmeRwCmd *)cmd;
269 const uint8_t lba_index = NVME_ID_NS_FLBAS_INDEX(ns->id_ns.flbas);
270 const uint8_t data_shift = ns->id_ns.lbaf[lba_index].ds;
271 uint64_t slba = le64_to_cpu(rw->slba);
272 uint32_t nlb = le16_to_cpu(rw->nlb) + 1;
273 uint64_t aio_slba = slba << (data_shift - BDRV_SECTOR_BITS);
274 uint32_t aio_nlb = nlb << (data_shift - BDRV_SECTOR_BITS);
276 if (slba + nlb > ns->id_ns.nsze) {
277 return NVME_LBA_RANGE | NVME_DNR;
280 req->has_sg = false;
281 block_acct_start(blk_get_stats(n->conf.blk), &req->acct, 0,
282 BLOCK_ACCT_WRITE);
283 req->aiocb = blk_aio_pwrite_zeroes(n->conf.blk, aio_slba, aio_nlb,
284 BDRV_REQ_MAY_UNMAP, nvme_rw_cb, req);
285 return NVME_NO_COMPLETE;
288 static uint16_t nvme_rw(NvmeCtrl *n, NvmeNamespace *ns, NvmeCmd *cmd,
289 NvmeRequest *req)
291 NvmeRwCmd *rw = (NvmeRwCmd *)cmd;
292 uint32_t nlb = le32_to_cpu(rw->nlb) + 1;
293 uint64_t slba = le64_to_cpu(rw->slba);
294 uint64_t prp1 = le64_to_cpu(rw->prp1);
295 uint64_t prp2 = le64_to_cpu(rw->prp2);
297 uint8_t lba_index = NVME_ID_NS_FLBAS_INDEX(ns->id_ns.flbas);
298 uint8_t data_shift = ns->id_ns.lbaf[lba_index].ds;
299 uint64_t data_size = (uint64_t)nlb << data_shift;
300 uint64_t data_offset = slba << data_shift;
301 int is_write = rw->opcode == NVME_CMD_WRITE ? 1 : 0;
302 enum BlockAcctType acct = is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ;
304 if ((slba + nlb) > ns->id_ns.nsze) {
305 block_acct_invalid(blk_get_stats(n->conf.blk), acct);
306 return NVME_LBA_RANGE | NVME_DNR;
309 if (nvme_map_prp(&req->qsg, &req->iov, prp1, prp2, data_size, n)) {
310 block_acct_invalid(blk_get_stats(n->conf.blk), acct);
311 return NVME_INVALID_FIELD | NVME_DNR;
314 dma_acct_start(n->conf.blk, &req->acct, &req->qsg, acct);
315 if (req->qsg.nsg > 0) {
316 req->has_sg = true;
317 req->aiocb = is_write ?
318 dma_blk_write(n->conf.blk, &req->qsg, data_offset, BDRV_SECTOR_SIZE,
319 nvme_rw_cb, req) :
320 dma_blk_read(n->conf.blk, &req->qsg, data_offset, BDRV_SECTOR_SIZE,
321 nvme_rw_cb, req);
322 } else {
323 req->has_sg = false;
324 req->aiocb = is_write ?
325 blk_aio_pwritev(n->conf.blk, data_offset, &req->iov, 0, nvme_rw_cb,
326 req) :
327 blk_aio_preadv(n->conf.blk, data_offset, &req->iov, 0, nvme_rw_cb,
328 req);
331 return NVME_NO_COMPLETE;
334 static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
336 NvmeNamespace *ns;
337 uint32_t nsid = le32_to_cpu(cmd->nsid);
339 if (nsid == 0 || nsid > n->num_namespaces) {
340 return NVME_INVALID_NSID | NVME_DNR;
343 ns = &n->namespaces[nsid - 1];
344 switch (cmd->opcode) {
345 case NVME_CMD_FLUSH:
346 return nvme_flush(n, ns, cmd, req);
347 case NVME_CMD_WRITE_ZEROS:
348 return nvme_write_zeros(n, ns, cmd, req);
349 case NVME_CMD_WRITE:
350 case NVME_CMD_READ:
351 return nvme_rw(n, ns, cmd, req);
352 default:
353 return NVME_INVALID_OPCODE | NVME_DNR;
357 static void nvme_free_sq(NvmeSQueue *sq, NvmeCtrl *n)
359 n->sq[sq->sqid] = NULL;
360 timer_del(sq->timer);
361 timer_free(sq->timer);
362 g_free(sq->io_req);
363 if (sq->sqid) {
364 g_free(sq);
368 static uint16_t nvme_del_sq(NvmeCtrl *n, NvmeCmd *cmd)
370 NvmeDeleteQ *c = (NvmeDeleteQ *)cmd;
371 NvmeRequest *req, *next;
372 NvmeSQueue *sq;
373 NvmeCQueue *cq;
374 uint16_t qid = le16_to_cpu(c->qid);
376 if (!qid || nvme_check_sqid(n, qid)) {
377 return NVME_INVALID_QID | NVME_DNR;
380 sq = n->sq[qid];
381 while (!QTAILQ_EMPTY(&sq->out_req_list)) {
382 req = QTAILQ_FIRST(&sq->out_req_list);
383 assert(req->aiocb);
384 blk_aio_cancel(req->aiocb);
386 if (!nvme_check_cqid(n, sq->cqid)) {
387 cq = n->cq[sq->cqid];
388 QTAILQ_REMOVE(&cq->sq_list, sq, entry);
390 nvme_post_cqes(cq);
391 QTAILQ_FOREACH_SAFE(req, &cq->req_list, entry, next) {
392 if (req->sq == sq) {
393 QTAILQ_REMOVE(&cq->req_list, req, entry);
394 QTAILQ_INSERT_TAIL(&sq->req_list, req, entry);
399 nvme_free_sq(sq, n);
400 return NVME_SUCCESS;
403 static void nvme_init_sq(NvmeSQueue *sq, NvmeCtrl *n, uint64_t dma_addr,
404 uint16_t sqid, uint16_t cqid, uint16_t size)
406 int i;
407 NvmeCQueue *cq;
409 sq->ctrl = n;
410 sq->dma_addr = dma_addr;
411 sq->sqid = sqid;
412 sq->size = size;
413 sq->cqid = cqid;
414 sq->head = sq->tail = 0;
415 sq->io_req = g_new(NvmeRequest, sq->size);
417 QTAILQ_INIT(&sq->req_list);
418 QTAILQ_INIT(&sq->out_req_list);
419 for (i = 0; i < sq->size; i++) {
420 sq->io_req[i].sq = sq;
421 QTAILQ_INSERT_TAIL(&(sq->req_list), &sq->io_req[i], entry);
423 sq->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, nvme_process_sq, sq);
425 assert(n->cq[cqid]);
426 cq = n->cq[cqid];
427 QTAILQ_INSERT_TAIL(&(cq->sq_list), sq, entry);
428 n->sq[sqid] = sq;
431 static uint16_t nvme_create_sq(NvmeCtrl *n, NvmeCmd *cmd)
433 NvmeSQueue *sq;
434 NvmeCreateSq *c = (NvmeCreateSq *)cmd;
436 uint16_t cqid = le16_to_cpu(c->cqid);
437 uint16_t sqid = le16_to_cpu(c->sqid);
438 uint16_t qsize = le16_to_cpu(c->qsize);
439 uint16_t qflags = le16_to_cpu(c->sq_flags);
440 uint64_t prp1 = le64_to_cpu(c->prp1);
442 if (!cqid || nvme_check_cqid(n, cqid)) {
443 return NVME_INVALID_CQID | NVME_DNR;
445 if (!sqid || !nvme_check_sqid(n, sqid)) {
446 return NVME_INVALID_QID | NVME_DNR;
448 if (!qsize || qsize > NVME_CAP_MQES(n->bar.cap)) {
449 return NVME_MAX_QSIZE_EXCEEDED | NVME_DNR;
451 if (!prp1 || prp1 & (n->page_size - 1)) {
452 return NVME_INVALID_FIELD | NVME_DNR;
454 if (!(NVME_SQ_FLAGS_PC(qflags))) {
455 return NVME_INVALID_FIELD | NVME_DNR;
457 sq = g_malloc0(sizeof(*sq));
458 nvme_init_sq(sq, n, prp1, sqid, cqid, qsize + 1);
459 return NVME_SUCCESS;
462 static void nvme_free_cq(NvmeCQueue *cq, NvmeCtrl *n)
464 n->cq[cq->cqid] = NULL;
465 timer_del(cq->timer);
466 timer_free(cq->timer);
467 msix_vector_unuse(&n->parent_obj, cq->vector);
468 if (cq->cqid) {
469 g_free(cq);
473 static uint16_t nvme_del_cq(NvmeCtrl *n, NvmeCmd *cmd)
475 NvmeDeleteQ *c = (NvmeDeleteQ *)cmd;
476 NvmeCQueue *cq;
477 uint16_t qid = le16_to_cpu(c->qid);
479 if (!qid || nvme_check_cqid(n, qid)) {
480 return NVME_INVALID_CQID | NVME_DNR;
483 cq = n->cq[qid];
484 if (!QTAILQ_EMPTY(&cq->sq_list)) {
485 return NVME_INVALID_QUEUE_DEL;
487 nvme_free_cq(cq, n);
488 return NVME_SUCCESS;
491 static void nvme_init_cq(NvmeCQueue *cq, NvmeCtrl *n, uint64_t dma_addr,
492 uint16_t cqid, uint16_t vector, uint16_t size, uint16_t irq_enabled)
494 cq->ctrl = n;
495 cq->cqid = cqid;
496 cq->size = size;
497 cq->dma_addr = dma_addr;
498 cq->phase = 1;
499 cq->irq_enabled = irq_enabled;
500 cq->vector = vector;
501 cq->head = cq->tail = 0;
502 QTAILQ_INIT(&cq->req_list);
503 QTAILQ_INIT(&cq->sq_list);
504 msix_vector_use(&n->parent_obj, cq->vector);
505 n->cq[cqid] = cq;
506 cq->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, nvme_post_cqes, cq);
509 static uint16_t nvme_create_cq(NvmeCtrl *n, NvmeCmd *cmd)
511 NvmeCQueue *cq;
512 NvmeCreateCq *c = (NvmeCreateCq *)cmd;
513 uint16_t cqid = le16_to_cpu(c->cqid);
514 uint16_t vector = le16_to_cpu(c->irq_vector);
515 uint16_t qsize = le16_to_cpu(c->qsize);
516 uint16_t qflags = le16_to_cpu(c->cq_flags);
517 uint64_t prp1 = le64_to_cpu(c->prp1);
519 if (!cqid || !nvme_check_cqid(n, cqid)) {
520 return NVME_INVALID_CQID | NVME_DNR;
522 if (!qsize || qsize > NVME_CAP_MQES(n->bar.cap)) {
523 return NVME_MAX_QSIZE_EXCEEDED | NVME_DNR;
525 if (!prp1) {
526 return NVME_INVALID_FIELD | NVME_DNR;
528 if (vector > n->num_queues) {
529 return NVME_INVALID_IRQ_VECTOR | NVME_DNR;
531 if (!(NVME_CQ_FLAGS_PC(qflags))) {
532 return NVME_INVALID_FIELD | NVME_DNR;
535 cq = g_malloc0(sizeof(*cq));
536 nvme_init_cq(cq, n, prp1, cqid, vector, qsize + 1,
537 NVME_CQ_FLAGS_IEN(qflags));
538 return NVME_SUCCESS;
541 static uint16_t nvme_identify_ctrl(NvmeCtrl *n, NvmeIdentify *c)
543 uint64_t prp1 = le64_to_cpu(c->prp1);
544 uint64_t prp2 = le64_to_cpu(c->prp2);
546 return nvme_dma_read_prp(n, (uint8_t *)&n->id_ctrl, sizeof(n->id_ctrl),
547 prp1, prp2);
550 static uint16_t nvme_identify_ns(NvmeCtrl *n, NvmeIdentify *c)
552 NvmeNamespace *ns;
553 uint32_t nsid = le32_to_cpu(c->nsid);
554 uint64_t prp1 = le64_to_cpu(c->prp1);
555 uint64_t prp2 = le64_to_cpu(c->prp2);
557 if (nsid == 0 || nsid > n->num_namespaces) {
558 return NVME_INVALID_NSID | NVME_DNR;
561 ns = &n->namespaces[nsid - 1];
562 return nvme_dma_read_prp(n, (uint8_t *)&ns->id_ns, sizeof(ns->id_ns),
563 prp1, prp2);
566 static uint16_t nvme_identify_nslist(NvmeCtrl *n, NvmeIdentify *c)
568 static const int data_len = 4096;
569 uint32_t min_nsid = le32_to_cpu(c->nsid);
570 uint64_t prp1 = le64_to_cpu(c->prp1);
571 uint64_t prp2 = le64_to_cpu(c->prp2);
572 uint32_t *list;
573 uint16_t ret;
574 int i, j = 0;
576 list = g_malloc0(data_len);
577 for (i = 0; i < n->num_namespaces; i++) {
578 if (i < min_nsid) {
579 continue;
581 list[j++] = cpu_to_le32(i + 1);
582 if (j == data_len / sizeof(uint32_t)) {
583 break;
586 ret = nvme_dma_read_prp(n, (uint8_t *)list, data_len, prp1, prp2);
587 g_free(list);
588 return ret;
592 static uint16_t nvme_identify(NvmeCtrl *n, NvmeCmd *cmd)
594 NvmeIdentify *c = (NvmeIdentify *)cmd;
596 switch (le32_to_cpu(c->cns)) {
597 case 0x00:
598 return nvme_identify_ns(n, c);
599 case 0x01:
600 return nvme_identify_ctrl(n, c);
601 case 0x02:
602 return nvme_identify_nslist(n, c);
603 default:
604 return NVME_INVALID_FIELD | NVME_DNR;
608 static uint16_t nvme_get_feature(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
610 uint32_t dw10 = le32_to_cpu(cmd->cdw10);
611 uint32_t result;
613 switch (dw10) {
614 case NVME_VOLATILE_WRITE_CACHE:
615 result = blk_enable_write_cache(n->conf.blk);
616 break;
617 case NVME_NUMBER_OF_QUEUES:
618 result = cpu_to_le32((n->num_queues - 2) | ((n->num_queues - 2) << 16));
619 break;
620 default:
621 return NVME_INVALID_FIELD | NVME_DNR;
624 req->cqe.result = result;
625 return NVME_SUCCESS;
628 static uint16_t nvme_set_feature(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
630 uint32_t dw10 = le32_to_cpu(cmd->cdw10);
631 uint32_t dw11 = le32_to_cpu(cmd->cdw11);
633 switch (dw10) {
634 case NVME_VOLATILE_WRITE_CACHE:
635 blk_set_enable_write_cache(n->conf.blk, dw11 & 1);
636 break;
637 case NVME_NUMBER_OF_QUEUES:
638 req->cqe.result =
639 cpu_to_le32((n->num_queues - 2) | ((n->num_queues - 2) << 16));
640 break;
641 default:
642 return NVME_INVALID_FIELD | NVME_DNR;
644 return NVME_SUCCESS;
647 static uint16_t nvme_admin_cmd(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
649 switch (cmd->opcode) {
650 case NVME_ADM_CMD_DELETE_SQ:
651 return nvme_del_sq(n, cmd);
652 case NVME_ADM_CMD_CREATE_SQ:
653 return nvme_create_sq(n, cmd);
654 case NVME_ADM_CMD_DELETE_CQ:
655 return nvme_del_cq(n, cmd);
656 case NVME_ADM_CMD_CREATE_CQ:
657 return nvme_create_cq(n, cmd);
658 case NVME_ADM_CMD_IDENTIFY:
659 return nvme_identify(n, cmd);
660 case NVME_ADM_CMD_SET_FEATURES:
661 return nvme_set_feature(n, cmd, req);
662 case NVME_ADM_CMD_GET_FEATURES:
663 return nvme_get_feature(n, cmd, req);
664 default:
665 return NVME_INVALID_OPCODE | NVME_DNR;
669 static void nvme_process_sq(void *opaque)
671 NvmeSQueue *sq = opaque;
672 NvmeCtrl *n = sq->ctrl;
673 NvmeCQueue *cq = n->cq[sq->cqid];
675 uint16_t status;
676 hwaddr addr;
677 NvmeCmd cmd;
678 NvmeRequest *req;
680 while (!(nvme_sq_empty(sq) || QTAILQ_EMPTY(&sq->req_list))) {
681 addr = sq->dma_addr + sq->head * n->sqe_size;
682 nvme_addr_read(n, addr, (void *)&cmd, sizeof(cmd));
683 nvme_inc_sq_head(sq);
685 req = QTAILQ_FIRST(&sq->req_list);
686 QTAILQ_REMOVE(&sq->req_list, req, entry);
687 QTAILQ_INSERT_TAIL(&sq->out_req_list, req, entry);
688 memset(&req->cqe, 0, sizeof(req->cqe));
689 req->cqe.cid = cmd.cid;
691 status = sq->sqid ? nvme_io_cmd(n, &cmd, req) :
692 nvme_admin_cmd(n, &cmd, req);
693 if (status != NVME_NO_COMPLETE) {
694 req->status = status;
695 nvme_enqueue_req_completion(cq, req);
700 static void nvme_clear_ctrl(NvmeCtrl *n)
702 int i;
704 for (i = 0; i < n->num_queues; i++) {
705 if (n->sq[i] != NULL) {
706 nvme_free_sq(n->sq[i], n);
709 for (i = 0; i < n->num_queues; i++) {
710 if (n->cq[i] != NULL) {
711 nvme_free_cq(n->cq[i], n);
715 blk_flush(n->conf.blk);
716 n->bar.cc = 0;
719 static int nvme_start_ctrl(NvmeCtrl *n)
721 uint32_t page_bits = NVME_CC_MPS(n->bar.cc) + 12;
722 uint32_t page_size = 1 << page_bits;
724 if (n->cq[0] || n->sq[0] || !n->bar.asq || !n->bar.acq ||
725 n->bar.asq & (page_size - 1) || n->bar.acq & (page_size - 1) ||
726 NVME_CC_MPS(n->bar.cc) < NVME_CAP_MPSMIN(n->bar.cap) ||
727 NVME_CC_MPS(n->bar.cc) > NVME_CAP_MPSMAX(n->bar.cap) ||
728 NVME_CC_IOCQES(n->bar.cc) < NVME_CTRL_CQES_MIN(n->id_ctrl.cqes) ||
729 NVME_CC_IOCQES(n->bar.cc) > NVME_CTRL_CQES_MAX(n->id_ctrl.cqes) ||
730 NVME_CC_IOSQES(n->bar.cc) < NVME_CTRL_SQES_MIN(n->id_ctrl.sqes) ||
731 NVME_CC_IOSQES(n->bar.cc) > NVME_CTRL_SQES_MAX(n->id_ctrl.sqes) ||
732 !NVME_AQA_ASQS(n->bar.aqa) || !NVME_AQA_ACQS(n->bar.aqa)) {
733 return -1;
736 n->page_bits = page_bits;
737 n->page_size = page_size;
738 n->max_prp_ents = n->page_size / sizeof(uint64_t);
739 n->cqe_size = 1 << NVME_CC_IOCQES(n->bar.cc);
740 n->sqe_size = 1 << NVME_CC_IOSQES(n->bar.cc);
741 nvme_init_cq(&n->admin_cq, n, n->bar.acq, 0, 0,
742 NVME_AQA_ACQS(n->bar.aqa) + 1, 1);
743 nvme_init_sq(&n->admin_sq, n, n->bar.asq, 0, 0,
744 NVME_AQA_ASQS(n->bar.aqa) + 1);
746 return 0;
749 static void nvme_write_bar(NvmeCtrl *n, hwaddr offset, uint64_t data,
750 unsigned size)
752 switch (offset) {
753 case 0xc:
754 n->bar.intms |= data & 0xffffffff;
755 n->bar.intmc = n->bar.intms;
756 break;
757 case 0x10:
758 n->bar.intms &= ~(data & 0xffffffff);
759 n->bar.intmc = n->bar.intms;
760 break;
761 case 0x14:
762 /* Windows first sends data, then sends enable bit */
763 if (!NVME_CC_EN(data) && !NVME_CC_EN(n->bar.cc) &&
764 !NVME_CC_SHN(data) && !NVME_CC_SHN(n->bar.cc))
766 n->bar.cc = data;
769 if (NVME_CC_EN(data) && !NVME_CC_EN(n->bar.cc)) {
770 n->bar.cc = data;
771 if (nvme_start_ctrl(n)) {
772 n->bar.csts = NVME_CSTS_FAILED;
773 } else {
774 n->bar.csts = NVME_CSTS_READY;
776 } else if (!NVME_CC_EN(data) && NVME_CC_EN(n->bar.cc)) {
777 nvme_clear_ctrl(n);
778 n->bar.csts &= ~NVME_CSTS_READY;
780 if (NVME_CC_SHN(data) && !(NVME_CC_SHN(n->bar.cc))) {
781 nvme_clear_ctrl(n);
782 n->bar.cc = data;
783 n->bar.csts |= NVME_CSTS_SHST_COMPLETE;
784 } else if (!NVME_CC_SHN(data) && NVME_CC_SHN(n->bar.cc)) {
785 n->bar.csts &= ~NVME_CSTS_SHST_COMPLETE;
786 n->bar.cc = data;
788 break;
789 case 0x24:
790 n->bar.aqa = data & 0xffffffff;
791 break;
792 case 0x28:
793 n->bar.asq = data;
794 break;
795 case 0x2c:
796 n->bar.asq |= data << 32;
797 break;
798 case 0x30:
799 n->bar.acq = data;
800 break;
801 case 0x34:
802 n->bar.acq |= data << 32;
803 break;
804 default:
805 break;
809 static uint64_t nvme_mmio_read(void *opaque, hwaddr addr, unsigned size)
811 NvmeCtrl *n = (NvmeCtrl *)opaque;
812 uint8_t *ptr = (uint8_t *)&n->bar;
813 uint64_t val = 0;
815 if (addr < sizeof(n->bar)) {
816 memcpy(&val, ptr + addr, size);
818 return val;
821 static void nvme_process_db(NvmeCtrl *n, hwaddr addr, int val)
823 uint32_t qid;
825 if (addr & ((1 << 2) - 1)) {
826 return;
829 if (((addr - 0x1000) >> 2) & 1) {
830 uint16_t new_head = val & 0xffff;
831 int start_sqs;
832 NvmeCQueue *cq;
834 qid = (addr - (0x1000 + (1 << 2))) >> 3;
835 if (nvme_check_cqid(n, qid)) {
836 return;
839 cq = n->cq[qid];
840 if (new_head >= cq->size) {
841 return;
844 start_sqs = nvme_cq_full(cq) ? 1 : 0;
845 cq->head = new_head;
846 if (start_sqs) {
847 NvmeSQueue *sq;
848 QTAILQ_FOREACH(sq, &cq->sq_list, entry) {
849 timer_mod(sq->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 500);
851 timer_mod(cq->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 500);
854 if (cq->tail != cq->head) {
855 nvme_isr_notify(n, cq);
857 } else {
858 uint16_t new_tail = val & 0xffff;
859 NvmeSQueue *sq;
861 qid = (addr - 0x1000) >> 3;
862 if (nvme_check_sqid(n, qid)) {
863 return;
866 sq = n->sq[qid];
867 if (new_tail >= sq->size) {
868 return;
871 sq->tail = new_tail;
872 timer_mod(sq->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 500);
876 static void nvme_mmio_write(void *opaque, hwaddr addr, uint64_t data,
877 unsigned size)
879 NvmeCtrl *n = (NvmeCtrl *)opaque;
880 if (addr < sizeof(n->bar)) {
881 nvme_write_bar(n, addr, data, size);
882 } else if (addr >= 0x1000) {
883 nvme_process_db(n, addr, data);
887 static const MemoryRegionOps nvme_mmio_ops = {
888 .read = nvme_mmio_read,
889 .write = nvme_mmio_write,
890 .endianness = DEVICE_LITTLE_ENDIAN,
891 .impl = {
892 .min_access_size = 2,
893 .max_access_size = 8,
897 static void nvme_cmb_write(void *opaque, hwaddr addr, uint64_t data,
898 unsigned size)
900 NvmeCtrl *n = (NvmeCtrl *)opaque;
901 memcpy(&n->cmbuf[addr], &data, size);
904 static uint64_t nvme_cmb_read(void *opaque, hwaddr addr, unsigned size)
906 uint64_t val;
907 NvmeCtrl *n = (NvmeCtrl *)opaque;
909 memcpy(&val, &n->cmbuf[addr], size);
910 return val;
913 static const MemoryRegionOps nvme_cmb_ops = {
914 .read = nvme_cmb_read,
915 .write = nvme_cmb_write,
916 .endianness = DEVICE_LITTLE_ENDIAN,
917 .impl = {
918 .min_access_size = 2,
919 .max_access_size = 8,
923 static int nvme_init(PCIDevice *pci_dev)
925 NvmeCtrl *n = NVME(pci_dev);
926 NvmeIdCtrl *id = &n->id_ctrl;
928 int i;
929 int64_t bs_size;
930 uint8_t *pci_conf;
931 Error *local_err = NULL;
933 if (!n->conf.blk) {
934 return -1;
937 bs_size = blk_getlength(n->conf.blk);
938 if (bs_size < 0) {
939 return -1;
942 blkconf_serial(&n->conf, &n->serial);
943 if (!n->serial) {
944 return -1;
946 blkconf_blocksizes(&n->conf);
947 blkconf_apply_backend_options(&n->conf, blk_is_read_only(n->conf.blk),
948 false, &local_err);
949 if (local_err) {
950 error_report_err(local_err);
951 return -1;
954 pci_conf = pci_dev->config;
955 pci_conf[PCI_INTERRUPT_PIN] = 1;
956 pci_config_set_prog_interface(pci_dev->config, 0x2);
957 pci_config_set_class(pci_dev->config, PCI_CLASS_STORAGE_EXPRESS);
958 pcie_endpoint_cap_init(&n->parent_obj, 0x80);
960 n->num_namespaces = 1;
961 n->num_queues = 64;
962 n->reg_size = pow2ceil(0x1004 + 2 * (n->num_queues + 1) * 4);
963 n->ns_size = bs_size / (uint64_t)n->num_namespaces;
965 n->namespaces = g_new0(NvmeNamespace, n->num_namespaces);
966 n->sq = g_new0(NvmeSQueue *, n->num_queues);
967 n->cq = g_new0(NvmeCQueue *, n->num_queues);
969 memory_region_init_io(&n->iomem, OBJECT(n), &nvme_mmio_ops, n,
970 "nvme", n->reg_size);
971 pci_register_bar(&n->parent_obj, 0,
972 PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_64,
973 &n->iomem);
974 msix_init_exclusive_bar(&n->parent_obj, n->num_queues, 4, NULL);
976 id->vid = cpu_to_le16(pci_get_word(pci_conf + PCI_VENDOR_ID));
977 id->ssvid = cpu_to_le16(pci_get_word(pci_conf + PCI_SUBSYSTEM_VENDOR_ID));
978 strpadcpy((char *)id->mn, sizeof(id->mn), "QEMU NVMe Ctrl", ' ');
979 strpadcpy((char *)id->fr, sizeof(id->fr), "1.0", ' ');
980 strpadcpy((char *)id->sn, sizeof(id->sn), n->serial, ' ');
981 id->rab = 6;
982 id->ieee[0] = 0x00;
983 id->ieee[1] = 0x02;
984 id->ieee[2] = 0xb3;
985 id->oacs = cpu_to_le16(0);
986 id->frmw = 7 << 1;
987 id->lpa = 1 << 0;
988 id->sqes = (0x6 << 4) | 0x6;
989 id->cqes = (0x4 << 4) | 0x4;
990 id->nn = cpu_to_le32(n->num_namespaces);
991 id->oncs = cpu_to_le16(NVME_ONCS_WRITE_ZEROS);
992 id->psd[0].mp = cpu_to_le16(0x9c4);
993 id->psd[0].enlat = cpu_to_le32(0x10);
994 id->psd[0].exlat = cpu_to_le32(0x4);
995 if (blk_enable_write_cache(n->conf.blk)) {
996 id->vwc = 1;
999 n->bar.cap = 0;
1000 NVME_CAP_SET_MQES(n->bar.cap, 0x7ff);
1001 NVME_CAP_SET_CQR(n->bar.cap, 1);
1002 NVME_CAP_SET_AMS(n->bar.cap, 1);
1003 NVME_CAP_SET_TO(n->bar.cap, 0xf);
1004 NVME_CAP_SET_CSS(n->bar.cap, 1);
1005 NVME_CAP_SET_MPSMAX(n->bar.cap, 4);
1007 n->bar.vs = 0x00010200;
1008 n->bar.intmc = n->bar.intms = 0;
1010 if (n->cmb_size_mb) {
1012 NVME_CMBLOC_SET_BIR(n->bar.cmbloc, 2);
1013 NVME_CMBLOC_SET_OFST(n->bar.cmbloc, 0);
1015 NVME_CMBSZ_SET_SQS(n->bar.cmbsz, 1);
1016 NVME_CMBSZ_SET_CQS(n->bar.cmbsz, 0);
1017 NVME_CMBSZ_SET_LISTS(n->bar.cmbsz, 0);
1018 NVME_CMBSZ_SET_RDS(n->bar.cmbsz, 1);
1019 NVME_CMBSZ_SET_WDS(n->bar.cmbsz, 1);
1020 NVME_CMBSZ_SET_SZU(n->bar.cmbsz, 2); /* MBs */
1021 NVME_CMBSZ_SET_SZ(n->bar.cmbsz, n->cmb_size_mb);
1023 n->cmbloc = n->bar.cmbloc;
1024 n->cmbsz = n->bar.cmbsz;
1026 n->cmbuf = g_malloc0(NVME_CMBSZ_GETSIZE(n->bar.cmbsz));
1027 memory_region_init_io(&n->ctrl_mem, OBJECT(n), &nvme_cmb_ops, n,
1028 "nvme-cmb", NVME_CMBSZ_GETSIZE(n->bar.cmbsz));
1029 pci_register_bar(&n->parent_obj, NVME_CMBLOC_BIR(n->bar.cmbloc),
1030 PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_64 |
1031 PCI_BASE_ADDRESS_MEM_PREFETCH, &n->ctrl_mem);
1035 for (i = 0; i < n->num_namespaces; i++) {
1036 NvmeNamespace *ns = &n->namespaces[i];
1037 NvmeIdNs *id_ns = &ns->id_ns;
1038 id_ns->nsfeat = 0;
1039 id_ns->nlbaf = 0;
1040 id_ns->flbas = 0;
1041 id_ns->mc = 0;
1042 id_ns->dpc = 0;
1043 id_ns->dps = 0;
1044 id_ns->lbaf[0].ds = BDRV_SECTOR_BITS;
1045 id_ns->ncap = id_ns->nuse = id_ns->nsze =
1046 cpu_to_le64(n->ns_size >>
1047 id_ns->lbaf[NVME_ID_NS_FLBAS_INDEX(ns->id_ns.flbas)].ds);
1049 return 0;
1052 static void nvme_exit(PCIDevice *pci_dev)
1054 NvmeCtrl *n = NVME(pci_dev);
1056 nvme_clear_ctrl(n);
1057 g_free(n->namespaces);
1058 g_free(n->cq);
1059 g_free(n->sq);
1060 if (n->cmbsz) {
1061 memory_region_unref(&n->ctrl_mem);
1064 msix_uninit_exclusive_bar(pci_dev);
1067 static Property nvme_props[] = {
1068 DEFINE_BLOCK_PROPERTIES(NvmeCtrl, conf),
1069 DEFINE_PROP_STRING("serial", NvmeCtrl, serial),
1070 DEFINE_PROP_UINT32("cmb_size_mb", NvmeCtrl, cmb_size_mb, 0),
1071 DEFINE_PROP_END_OF_LIST(),
1074 static const VMStateDescription nvme_vmstate = {
1075 .name = "nvme",
1076 .unmigratable = 1,
1079 static void nvme_class_init(ObjectClass *oc, void *data)
1081 DeviceClass *dc = DEVICE_CLASS(oc);
1082 PCIDeviceClass *pc = PCI_DEVICE_CLASS(oc);
1084 pc->init = nvme_init;
1085 pc->exit = nvme_exit;
1086 pc->class_id = PCI_CLASS_STORAGE_EXPRESS;
1087 pc->vendor_id = PCI_VENDOR_ID_INTEL;
1088 pc->device_id = 0x5845;
1089 pc->revision = 2;
1090 pc->is_express = 1;
1092 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1093 dc->desc = "Non-Volatile Memory Express";
1094 dc->props = nvme_props;
1095 dc->vmsd = &nvme_vmstate;
1098 static void nvme_instance_init(Object *obj)
1100 NvmeCtrl *s = NVME(obj);
1102 device_add_bootindex_property(obj, &s->conf.bootindex,
1103 "bootindex", "/namespace@1,0",
1104 DEVICE(obj), &error_abort);
1107 static const TypeInfo nvme_info = {
1108 .name = "nvme",
1109 .parent = TYPE_PCI_DEVICE,
1110 .instance_size = sizeof(NvmeCtrl),
1111 .class_init = nvme_class_init,
1112 .instance_init = nvme_instance_init,
1115 static void nvme_register_types(void)
1117 type_register_static(&nvme_info);
1120 type_init(nvme_register_types)