2 * NVMe block driver based on vfio
4 * Copyright 2016 - 2018 Red Hat, Inc.
7 * Fam Zheng <famz@redhat.com>
8 * Paolo Bonzini <pbonzini@redhat.com>
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include <linux/vfio.h>
16 #include "qapi/error.h"
17 #include "qapi/qmp/qdict.h"
18 #include "qapi/qmp/qstring.h"
19 #include "qemu/error-report.h"
20 #include "qemu/main-loop.h"
21 #include "qemu/module.h"
22 #include "qemu/cutils.h"
23 #include "qemu/option.h"
24 #include "qemu/vfio-helpers.h"
25 #include "block/block_int.h"
26 #include "sysemu/replay.h"
29 #include "block/nvme.h"
31 #define NVME_SQ_ENTRY_BYTES 64
32 #define NVME_CQ_ENTRY_BYTES 16
33 #define NVME_QUEUE_SIZE 128
34 #define NVME_BAR_SIZE 8192
40 /* Hardware MMIO register */
41 volatile uint32_t *doorbell
;
45 BlockCompletionFunc
*cb
;
49 uint64_t prp_list_iova
;
54 CoQueue free_req_queue
;
57 /* Fields protected by BQL */
59 uint8_t *prp_list_pages
;
61 /* Fields protected by @lock */
64 NVMeRequest reqs
[NVME_QUEUE_SIZE
];
70 /* Memory mapped registers */
71 typedef volatile struct {
85 uint8_t reserved1
[0xec0];
86 uint8_t cmd_set_specfic
[0x100];
90 QEMU_BUILD_BUG_ON(offsetof(NVMeRegs
, doorbells
) != 0x1000);
93 AioContext
*aio_context
;
96 /* The submission/completion queue pairs.
100 NVMeQueuePair
**queues
;
103 /* How many uint32_t elements does each doorbell entry take. */
104 size_t doorbell_scale
;
105 bool write_cache_supported
;
106 EventNotifier irq_notifier
;
108 uint64_t nsze
; /* Namespace size reported by identify command */
109 int nsid
; /* The namespace id to read/write data. */
112 uint64_t max_transfer
;
115 CoMutex dma_map_lock
;
116 CoQueue dma_flush_queue
;
118 /* Total size of mapped qiov, accessed under dma_map_lock */
121 /* PCI address (required for nvme_refresh_filename()) */
125 #define NVME_BLOCK_OPT_DEVICE "device"
126 #define NVME_BLOCK_OPT_NAMESPACE "namespace"
128 static QemuOptsList runtime_opts
= {
130 .head
= QTAILQ_HEAD_INITIALIZER(runtime_opts
.head
),
133 .name
= NVME_BLOCK_OPT_DEVICE
,
134 .type
= QEMU_OPT_STRING
,
135 .help
= "NVMe PCI device address",
138 .name
= NVME_BLOCK_OPT_NAMESPACE
,
139 .type
= QEMU_OPT_NUMBER
,
140 .help
= "NVMe namespace",
142 { /* end of list */ }
146 static void nvme_init_queue(BlockDriverState
*bs
, NVMeQueue
*q
,
147 int nentries
, int entry_bytes
, Error
**errp
)
149 BDRVNVMeState
*s
= bs
->opaque
;
153 bytes
= ROUND_UP(nentries
* entry_bytes
, s
->page_size
);
154 q
->head
= q
->tail
= 0;
155 q
->queue
= qemu_try_blockalign0(bs
, bytes
);
158 error_setg(errp
, "Cannot allocate queue");
161 r
= qemu_vfio_dma_map(s
->vfio
, q
->queue
, bytes
, false, &q
->iova
);
163 error_setg(errp
, "Cannot map queue");
167 static void nvme_free_queue_pair(BlockDriverState
*bs
, NVMeQueuePair
*q
)
169 qemu_vfree(q
->prp_list_pages
);
170 qemu_vfree(q
->sq
.queue
);
171 qemu_vfree(q
->cq
.queue
);
172 qemu_mutex_destroy(&q
->lock
);
176 static void nvme_free_req_queue_cb(void *opaque
)
178 NVMeQueuePair
*q
= opaque
;
180 qemu_mutex_lock(&q
->lock
);
181 while (qemu_co_enter_next(&q
->free_req_queue
, &q
->lock
)) {
182 /* Retry all pending requests */
184 qemu_mutex_unlock(&q
->lock
);
187 static NVMeQueuePair
*nvme_create_queue_pair(BlockDriverState
*bs
,
192 BDRVNVMeState
*s
= bs
->opaque
;
193 Error
*local_err
= NULL
;
194 NVMeQueuePair
*q
= g_new0(NVMeQueuePair
, 1);
195 uint64_t prp_list_iova
;
197 qemu_mutex_init(&q
->lock
);
199 qemu_co_queue_init(&q
->free_req_queue
);
200 q
->prp_list_pages
= qemu_blockalign0(bs
, s
->page_size
* NVME_QUEUE_SIZE
);
201 r
= qemu_vfio_dma_map(s
->vfio
, q
->prp_list_pages
,
202 s
->page_size
* NVME_QUEUE_SIZE
,
203 false, &prp_list_iova
);
207 for (i
= 0; i
< NVME_QUEUE_SIZE
; i
++) {
208 NVMeRequest
*req
= &q
->reqs
[i
];
210 req
->prp_list_page
= q
->prp_list_pages
+ i
* s
->page_size
;
211 req
->prp_list_iova
= prp_list_iova
+ i
* s
->page_size
;
213 nvme_init_queue(bs
, &q
->sq
, size
, NVME_SQ_ENTRY_BYTES
, &local_err
);
215 error_propagate(errp
, local_err
);
218 q
->sq
.doorbell
= &s
->regs
->doorbells
[idx
* 2 * s
->doorbell_scale
];
220 nvme_init_queue(bs
, &q
->cq
, size
, NVME_CQ_ENTRY_BYTES
, &local_err
);
222 error_propagate(errp
, local_err
);
225 q
->cq
.doorbell
= &s
->regs
->doorbells
[(idx
* 2 + 1) * s
->doorbell_scale
];
229 nvme_free_queue_pair(bs
, q
);
234 static void nvme_kick(BDRVNVMeState
*s
, NVMeQueuePair
*q
)
236 if (s
->plugged
|| !q
->need_kick
) {
239 trace_nvme_kick(s
, q
->index
);
240 assert(!(q
->sq
.tail
& 0xFF00));
241 /* Fence the write to submission queue entry before notifying the device. */
243 *q
->sq
.doorbell
= cpu_to_le32(q
->sq
.tail
);
244 q
->inflight
+= q
->need_kick
;
248 /* Find a free request element if any, otherwise:
249 * a) if in coroutine context, try to wait for one to become available;
250 * b) if not in coroutine, return NULL;
252 static NVMeRequest
*nvme_get_free_req(NVMeQueuePair
*q
)
255 NVMeRequest
*req
= NULL
;
257 qemu_mutex_lock(&q
->lock
);
258 while (q
->inflight
+ q
->need_kick
> NVME_QUEUE_SIZE
- 2) {
259 /* We have to leave one slot empty as that is the full queue case (head
261 if (qemu_in_coroutine()) {
262 trace_nvme_free_req_queue_wait(q
);
263 qemu_co_queue_wait(&q
->free_req_queue
, &q
->lock
);
265 qemu_mutex_unlock(&q
->lock
);
269 for (i
= 0; i
< NVME_QUEUE_SIZE
; i
++) {
270 if (!q
->reqs
[i
].busy
) {
271 q
->reqs
[i
].busy
= true;
276 /* We have checked inflight and need_kick while holding q->lock, so one
277 * free req must be available. */
279 qemu_mutex_unlock(&q
->lock
);
283 static inline int nvme_translate_error(const NvmeCqe
*c
)
285 uint16_t status
= (le16_to_cpu(c
->status
) >> 1) & 0xFF;
287 trace_nvme_error(le32_to_cpu(c
->result
),
288 le16_to_cpu(c
->sq_head
),
289 le16_to_cpu(c
->sq_id
),
291 le16_to_cpu(status
));
306 static bool nvme_process_completion(BDRVNVMeState
*s
, NVMeQueuePair
*q
)
308 bool progress
= false;
313 trace_nvme_process_completion(s
, q
->index
, q
->inflight
);
314 if (q
->busy
|| s
->plugged
) {
315 trace_nvme_process_completion_queue_busy(s
, q
->index
);
319 assert(q
->inflight
>= 0);
320 while (q
->inflight
) {
322 c
= (NvmeCqe
*)&q
->cq
.queue
[q
->cq
.head
* NVME_CQ_ENTRY_BYTES
];
323 if ((le16_to_cpu(c
->status
) & 0x1) == q
->cq_phase
) {
326 q
->cq
.head
= (q
->cq
.head
+ 1) % NVME_QUEUE_SIZE
;
328 q
->cq_phase
= !q
->cq_phase
;
330 cid
= le16_to_cpu(c
->cid
);
331 if (cid
== 0 || cid
> NVME_QUEUE_SIZE
) {
332 fprintf(stderr
, "Unexpected CID in completion queue: %" PRIu32
"\n",
336 assert(cid
<= NVME_QUEUE_SIZE
);
337 trace_nvme_complete_command(s
, q
->index
, cid
);
338 preq
= &q
->reqs
[cid
- 1];
340 assert(req
.cid
== cid
);
343 preq
->cb
= preq
->opaque
= NULL
;
344 qemu_mutex_unlock(&q
->lock
);
345 req
.cb(req
.opaque
, nvme_translate_error(c
));
346 qemu_mutex_lock(&q
->lock
);
351 /* Notify the device so it can post more completions. */
353 *q
->cq
.doorbell
= cpu_to_le32(q
->cq
.head
);
354 if (!qemu_co_queue_empty(&q
->free_req_queue
)) {
355 replay_bh_schedule_oneshot_event(s
->aio_context
,
356 nvme_free_req_queue_cb
, q
);
363 static void nvme_trace_command(const NvmeCmd
*cmd
)
367 for (i
= 0; i
< 8; ++i
) {
368 uint8_t *cmdp
= (uint8_t *)cmd
+ i
* 8;
369 trace_nvme_submit_command_raw(cmdp
[0], cmdp
[1], cmdp
[2], cmdp
[3],
370 cmdp
[4], cmdp
[5], cmdp
[6], cmdp
[7]);
374 static void nvme_submit_command(BDRVNVMeState
*s
, NVMeQueuePair
*q
,
376 NvmeCmd
*cmd
, BlockCompletionFunc cb
,
381 req
->opaque
= opaque
;
382 cmd
->cid
= cpu_to_le32(req
->cid
);
384 trace_nvme_submit_command(s
, q
->index
, req
->cid
);
385 nvme_trace_command(cmd
);
386 qemu_mutex_lock(&q
->lock
);
387 memcpy((uint8_t *)q
->sq
.queue
+
388 q
->sq
.tail
* NVME_SQ_ENTRY_BYTES
, cmd
, sizeof(*cmd
));
389 q
->sq
.tail
= (q
->sq
.tail
+ 1) % NVME_QUEUE_SIZE
;
392 nvme_process_completion(s
, q
);
393 qemu_mutex_unlock(&q
->lock
);
396 static void nvme_cmd_sync_cb(void *opaque
, int ret
)
403 static int nvme_cmd_sync(BlockDriverState
*bs
, NVMeQueuePair
*q
,
407 BDRVNVMeState
*s
= bs
->opaque
;
408 int ret
= -EINPROGRESS
;
409 req
= nvme_get_free_req(q
);
413 nvme_submit_command(s
, q
, req
, cmd
, nvme_cmd_sync_cb
, &ret
);
415 BDRV_POLL_WHILE(bs
, ret
== -EINPROGRESS
);
419 static void nvme_identify(BlockDriverState
*bs
, int namespace, Error
**errp
)
421 BDRVNVMeState
*s
= bs
->opaque
;
429 .opcode
= NVME_ADM_CMD_IDENTIFY
,
430 .cdw10
= cpu_to_le32(0x1),
433 resp
= qemu_try_blockalign0(bs
, sizeof(NvmeIdCtrl
));
435 error_setg(errp
, "Cannot allocate buffer for identify response");
438 idctrl
= (NvmeIdCtrl
*)resp
;
439 idns
= (NvmeIdNs
*)resp
;
440 r
= qemu_vfio_dma_map(s
->vfio
, resp
, sizeof(NvmeIdCtrl
), true, &iova
);
442 error_setg(errp
, "Cannot map buffer for DMA");
445 cmd
.prp1
= cpu_to_le64(iova
);
447 if (nvme_cmd_sync(bs
, s
->queues
[0], &cmd
)) {
448 error_setg(errp
, "Failed to identify controller");
452 if (le32_to_cpu(idctrl
->nn
) < namespace) {
453 error_setg(errp
, "Invalid namespace");
456 s
->write_cache_supported
= le32_to_cpu(idctrl
->vwc
) & 0x1;
457 s
->max_transfer
= (idctrl
->mdts
? 1 << idctrl
->mdts
: 0) * s
->page_size
;
458 /* For now the page list buffer per command is one page, to hold at most
459 * s->page_size / sizeof(uint64_t) entries. */
460 s
->max_transfer
= MIN_NON_ZERO(s
->max_transfer
,
461 s
->page_size
/ sizeof(uint64_t) * s
->page_size
);
463 memset(resp
, 0, 4096);
466 cmd
.nsid
= cpu_to_le32(namespace);
467 if (nvme_cmd_sync(bs
, s
->queues
[0], &cmd
)) {
468 error_setg(errp
, "Failed to identify namespace");
472 s
->nsze
= le64_to_cpu(idns
->nsze
);
473 lbaf
= &idns
->lbaf
[NVME_ID_NS_FLBAS_INDEX(idns
->flbas
)];
476 error_setg(errp
, "Namespaces with metadata are not yet supported");
480 if (lbaf
->ds
< BDRV_SECTOR_BITS
|| lbaf
->ds
> 12 ||
481 (1 << lbaf
->ds
) > s
->page_size
)
483 error_setg(errp
, "Namespace has unsupported block size (2^%d)",
488 s
->blkshift
= lbaf
->ds
;
490 qemu_vfio_dma_unmap(s
->vfio
, resp
);
494 static bool nvme_poll_queues(BDRVNVMeState
*s
)
496 bool progress
= false;
499 for (i
= 0; i
< s
->nr_queues
; i
++) {
500 NVMeQueuePair
*q
= s
->queues
[i
];
501 qemu_mutex_lock(&q
->lock
);
502 while (nvme_process_completion(s
, q
)) {
506 qemu_mutex_unlock(&q
->lock
);
511 static void nvme_handle_event(EventNotifier
*n
)
513 BDRVNVMeState
*s
= container_of(n
, BDRVNVMeState
, irq_notifier
);
515 trace_nvme_handle_event(s
);
516 event_notifier_test_and_clear(n
);
520 static bool nvme_add_io_queue(BlockDriverState
*bs
, Error
**errp
)
522 BDRVNVMeState
*s
= bs
->opaque
;
523 int n
= s
->nr_queues
;
526 int queue_size
= NVME_QUEUE_SIZE
;
528 q
= nvme_create_queue_pair(bs
, n
, queue_size
, errp
);
533 .opcode
= NVME_ADM_CMD_CREATE_CQ
,
534 .prp1
= cpu_to_le64(q
->cq
.iova
),
535 .cdw10
= cpu_to_le32(((queue_size
- 1) << 16) | (n
& 0xFFFF)),
536 .cdw11
= cpu_to_le32(0x3),
538 if (nvme_cmd_sync(bs
, s
->queues
[0], &cmd
)) {
539 error_setg(errp
, "Failed to create io queue [%d]", n
);
540 nvme_free_queue_pair(bs
, q
);
544 .opcode
= NVME_ADM_CMD_CREATE_SQ
,
545 .prp1
= cpu_to_le64(q
->sq
.iova
),
546 .cdw10
= cpu_to_le32(((queue_size
- 1) << 16) | (n
& 0xFFFF)),
547 .cdw11
= cpu_to_le32(0x1 | (n
<< 16)),
549 if (nvme_cmd_sync(bs
, s
->queues
[0], &cmd
)) {
550 error_setg(errp
, "Failed to create io queue [%d]", n
);
551 nvme_free_queue_pair(bs
, q
);
554 s
->queues
= g_renew(NVMeQueuePair
*, s
->queues
, n
+ 1);
560 static bool nvme_poll_cb(void *opaque
)
562 EventNotifier
*e
= opaque
;
563 BDRVNVMeState
*s
= container_of(e
, BDRVNVMeState
, irq_notifier
);
564 bool progress
= false;
566 trace_nvme_poll_cb(s
);
567 progress
= nvme_poll_queues(s
);
571 static int nvme_init(BlockDriverState
*bs
, const char *device
, int namespace,
574 BDRVNVMeState
*s
= bs
->opaque
;
578 uint64_t deadline
, now
;
579 Error
*local_err
= NULL
;
581 qemu_co_mutex_init(&s
->dma_map_lock
);
582 qemu_co_queue_init(&s
->dma_flush_queue
);
583 s
->device
= g_strdup(device
);
585 s
->aio_context
= bdrv_get_aio_context(bs
);
586 ret
= event_notifier_init(&s
->irq_notifier
, 0);
588 error_setg(errp
, "Failed to init event notifier");
592 s
->vfio
= qemu_vfio_open_pci(device
, errp
);
598 s
->regs
= qemu_vfio_pci_map_bar(s
->vfio
, 0, 0, NVME_BAR_SIZE
, errp
);
604 /* Perform initialize sequence as described in NVMe spec "7.6.1
605 * Initialization". */
607 cap
= le64_to_cpu(s
->regs
->cap
);
608 if (!(cap
& (1ULL << 37))) {
609 error_setg(errp
, "Device doesn't support NVMe command set");
614 s
->page_size
= MAX(4096, 1 << (12 + ((cap
>> 48) & 0xF)));
615 s
->doorbell_scale
= (4 << (((cap
>> 32) & 0xF))) / sizeof(uint32_t);
616 bs
->bl
.opt_mem_alignment
= s
->page_size
;
617 timeout_ms
= MIN(500 * ((cap
>> 24) & 0xFF), 30000);
619 /* Reset device to get a clean state. */
620 s
->regs
->cc
= cpu_to_le32(le32_to_cpu(s
->regs
->cc
) & 0xFE);
621 /* Wait for CSTS.RDY = 0. */
622 deadline
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) + timeout_ms
* 1000000ULL;
623 while (le32_to_cpu(s
->regs
->csts
) & 0x1) {
624 if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) > deadline
) {
625 error_setg(errp
, "Timeout while waiting for device to reset (%"
633 /* Set up admin queue. */
634 s
->queues
= g_new(NVMeQueuePair
*, 1);
635 s
->queues
[0] = nvme_create_queue_pair(bs
, 0, NVME_QUEUE_SIZE
, errp
);
641 QEMU_BUILD_BUG_ON(NVME_QUEUE_SIZE
& 0xF000);
642 s
->regs
->aqa
= cpu_to_le32((NVME_QUEUE_SIZE
<< 16) | NVME_QUEUE_SIZE
);
643 s
->regs
->asq
= cpu_to_le64(s
->queues
[0]->sq
.iova
);
644 s
->regs
->acq
= cpu_to_le64(s
->queues
[0]->cq
.iova
);
646 /* After setting up all control registers we can enable device now. */
647 s
->regs
->cc
= cpu_to_le32((ctz32(NVME_CQ_ENTRY_BYTES
) << 20) |
648 (ctz32(NVME_SQ_ENTRY_BYTES
) << 16) |
650 /* Wait for CSTS.RDY = 1. */
651 now
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
652 deadline
= now
+ timeout_ms
* 1000000;
653 while (!(le32_to_cpu(s
->regs
->csts
) & 0x1)) {
654 if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) > deadline
) {
655 error_setg(errp
, "Timeout while waiting for device to start (%"
663 ret
= qemu_vfio_pci_init_irq(s
->vfio
, &s
->irq_notifier
,
664 VFIO_PCI_MSIX_IRQ_INDEX
, errp
);
668 aio_set_event_notifier(bdrv_get_aio_context(bs
), &s
->irq_notifier
,
669 false, nvme_handle_event
, nvme_poll_cb
);
671 nvme_identify(bs
, namespace, &local_err
);
673 error_propagate(errp
, local_err
);
678 /* Set up command queues. */
679 if (!nvme_add_io_queue(bs
, errp
)) {
683 /* Cleaning up is done in nvme_file_open() upon error. */
687 /* Parse a filename in the format of nvme://XXXX:XX:XX.X/X. Example:
689 * nvme://0000:44:00.0/1
691 * where the "nvme://" is a fixed form of the protocol prefix, the middle part
692 * is the PCI address, and the last part is the namespace number starting from
693 * 1 according to the NVMe spec. */
694 static void nvme_parse_filename(const char *filename
, QDict
*options
,
697 int pref
= strlen("nvme://");
699 if (strlen(filename
) > pref
&& !strncmp(filename
, "nvme://", pref
)) {
700 const char *tmp
= filename
+ pref
;
702 const char *namespace;
704 const char *slash
= strchr(tmp
, '/');
706 qdict_put_str(options
, NVME_BLOCK_OPT_DEVICE
, tmp
);
709 device
= g_strndup(tmp
, slash
- tmp
);
710 qdict_put_str(options
, NVME_BLOCK_OPT_DEVICE
, device
);
712 namespace = slash
+ 1;
713 if (*namespace && qemu_strtoul(namespace, NULL
, 10, &ns
)) {
714 error_setg(errp
, "Invalid namespace '%s', positive number expected",
718 qdict_put_str(options
, NVME_BLOCK_OPT_NAMESPACE
,
719 *namespace ? namespace : "1");
723 static int nvme_enable_disable_write_cache(BlockDriverState
*bs
, bool enable
,
727 BDRVNVMeState
*s
= bs
->opaque
;
729 .opcode
= NVME_ADM_CMD_SET_FEATURES
,
730 .nsid
= cpu_to_le32(s
->nsid
),
731 .cdw10
= cpu_to_le32(0x06),
732 .cdw11
= cpu_to_le32(enable
? 0x01 : 0x00),
735 ret
= nvme_cmd_sync(bs
, s
->queues
[0], &cmd
);
737 error_setg(errp
, "Failed to configure NVMe write cache");
742 static void nvme_close(BlockDriverState
*bs
)
745 BDRVNVMeState
*s
= bs
->opaque
;
747 for (i
= 0; i
< s
->nr_queues
; ++i
) {
748 nvme_free_queue_pair(bs
, s
->queues
[i
]);
751 aio_set_event_notifier(bdrv_get_aio_context(bs
), &s
->irq_notifier
,
753 event_notifier_cleanup(&s
->irq_notifier
);
754 qemu_vfio_pci_unmap_bar(s
->vfio
, 0, (void *)s
->regs
, 0, NVME_BAR_SIZE
);
755 qemu_vfio_close(s
->vfio
);
760 static int nvme_file_open(BlockDriverState
*bs
, QDict
*options
, int flags
,
767 BDRVNVMeState
*s
= bs
->opaque
;
769 opts
= qemu_opts_create(&runtime_opts
, NULL
, 0, &error_abort
);
770 qemu_opts_absorb_qdict(opts
, options
, &error_abort
);
771 device
= qemu_opt_get(opts
, NVME_BLOCK_OPT_DEVICE
);
773 error_setg(errp
, "'" NVME_BLOCK_OPT_DEVICE
"' option is required");
778 namespace = qemu_opt_get_number(opts
, NVME_BLOCK_OPT_NAMESPACE
, 1);
779 ret
= nvme_init(bs
, device
, namespace, errp
);
784 if (flags
& BDRV_O_NOCACHE
) {
785 if (!s
->write_cache_supported
) {
787 "NVMe controller doesn't support write cache configuration");
790 ret
= nvme_enable_disable_write_cache(bs
, !(flags
& BDRV_O_NOCACHE
),
797 bs
->supported_write_flags
= BDRV_REQ_FUA
;
804 static int64_t nvme_getlength(BlockDriverState
*bs
)
806 BDRVNVMeState
*s
= bs
->opaque
;
807 return s
->nsze
<< s
->blkshift
;
810 static uint32_t nvme_get_blocksize(BlockDriverState
*bs
)
812 BDRVNVMeState
*s
= bs
->opaque
;
813 assert(s
->blkshift
>= BDRV_SECTOR_BITS
&& s
->blkshift
<= 12);
814 return UINT32_C(1) << s
->blkshift
;
817 static int nvme_probe_blocksizes(BlockDriverState
*bs
, BlockSizes
*bsz
)
819 uint32_t blocksize
= nvme_get_blocksize(bs
);
820 bsz
->phys
= blocksize
;
821 bsz
->log
= blocksize
;
825 /* Called with s->dma_map_lock */
826 static coroutine_fn
int nvme_cmd_unmap_qiov(BlockDriverState
*bs
,
830 BDRVNVMeState
*s
= bs
->opaque
;
832 s
->dma_map_count
-= qiov
->size
;
833 if (!s
->dma_map_count
&& !qemu_co_queue_empty(&s
->dma_flush_queue
)) {
834 r
= qemu_vfio_dma_reset_temporary(s
->vfio
);
836 qemu_co_queue_restart_all(&s
->dma_flush_queue
);
842 /* Called with s->dma_map_lock */
843 static coroutine_fn
int nvme_cmd_map_qiov(BlockDriverState
*bs
, NvmeCmd
*cmd
,
844 NVMeRequest
*req
, QEMUIOVector
*qiov
)
846 BDRVNVMeState
*s
= bs
->opaque
;
847 uint64_t *pagelist
= req
->prp_list_page
;
852 assert(QEMU_IS_ALIGNED(qiov
->size
, s
->page_size
));
853 assert(qiov
->size
/ s
->page_size
<= s
->page_size
/ sizeof(uint64_t));
854 for (i
= 0; i
< qiov
->niov
; ++i
) {
858 r
= qemu_vfio_dma_map(s
->vfio
,
859 qiov
->iov
[i
].iov_base
,
860 qiov
->iov
[i
].iov_len
,
862 if (r
== -ENOMEM
&& retry
) {
864 trace_nvme_dma_flush_queue_wait(s
);
865 if (s
->dma_map_count
) {
866 trace_nvme_dma_map_flush(s
);
867 qemu_co_queue_wait(&s
->dma_flush_queue
, &s
->dma_map_lock
);
869 r
= qemu_vfio_dma_reset_temporary(s
->vfio
);
880 for (j
= 0; j
< qiov
->iov
[i
].iov_len
/ s
->page_size
; j
++) {
881 pagelist
[entries
++] = cpu_to_le64(iova
+ j
* s
->page_size
);
883 trace_nvme_cmd_map_qiov_iov(s
, i
, qiov
->iov
[i
].iov_base
,
884 qiov
->iov
[i
].iov_len
/ s
->page_size
);
887 s
->dma_map_count
+= qiov
->size
;
889 assert(entries
<= s
->page_size
/ sizeof(uint64_t));
894 cmd
->prp1
= pagelist
[0];
898 cmd
->prp1
= pagelist
[0];
899 cmd
->prp2
= pagelist
[1];
902 cmd
->prp1
= pagelist
[0];
903 cmd
->prp2
= cpu_to_le64(req
->prp_list_iova
+ sizeof(uint64_t));
906 trace_nvme_cmd_map_qiov(s
, cmd
, req
, qiov
, entries
);
907 for (i
= 0; i
< entries
; ++i
) {
908 trace_nvme_cmd_map_qiov_pages(s
, i
, pagelist
[i
]);
912 /* No need to unmap [0 - i) iovs even if we've failed, since we don't
913 * increment s->dma_map_count. This is okay for fixed mapping memory areas
914 * because they are already mapped before calling this function; for
915 * temporary mappings, a later nvme_cmd_(un)map_qiov will reclaim by
916 * calling qemu_vfio_dma_reset_temporary when necessary. */
926 static void nvme_rw_cb_bh(void *opaque
)
928 NVMeCoData
*data
= opaque
;
929 qemu_coroutine_enter(data
->co
);
932 static void nvme_rw_cb(void *opaque
, int ret
)
934 NVMeCoData
*data
= opaque
;
937 /* The rw coroutine hasn't yielded, don't try to enter. */
940 replay_bh_schedule_oneshot_event(data
->ctx
, nvme_rw_cb_bh
, data
);
943 static coroutine_fn
int nvme_co_prw_aligned(BlockDriverState
*bs
,
944 uint64_t offset
, uint64_t bytes
,
950 BDRVNVMeState
*s
= bs
->opaque
;
951 NVMeQueuePair
*ioq
= s
->queues
[1];
954 uint32_t cdw12
= (((bytes
>> s
->blkshift
) - 1) & 0xFFFF) |
955 (flags
& BDRV_REQ_FUA
? 1 << 30 : 0);
957 .opcode
= is_write
? NVME_CMD_WRITE
: NVME_CMD_READ
,
958 .nsid
= cpu_to_le32(s
->nsid
),
959 .cdw10
= cpu_to_le32((offset
>> s
->blkshift
) & 0xFFFFFFFF),
960 .cdw11
= cpu_to_le32(((offset
>> s
->blkshift
) >> 32) & 0xFFFFFFFF),
961 .cdw12
= cpu_to_le32(cdw12
),
964 .ctx
= bdrv_get_aio_context(bs
),
968 trace_nvme_prw_aligned(s
, is_write
, offset
, bytes
, flags
, qiov
->niov
);
969 assert(s
->nr_queues
> 1);
970 req
= nvme_get_free_req(ioq
);
973 qemu_co_mutex_lock(&s
->dma_map_lock
);
974 r
= nvme_cmd_map_qiov(bs
, &cmd
, req
, qiov
);
975 qemu_co_mutex_unlock(&s
->dma_map_lock
);
980 nvme_submit_command(s
, ioq
, req
, &cmd
, nvme_rw_cb
, &data
);
982 data
.co
= qemu_coroutine_self();
983 while (data
.ret
== -EINPROGRESS
) {
984 qemu_coroutine_yield();
987 qemu_co_mutex_lock(&s
->dma_map_lock
);
988 r
= nvme_cmd_unmap_qiov(bs
, qiov
);
989 qemu_co_mutex_unlock(&s
->dma_map_lock
);
994 trace_nvme_rw_done(s
, is_write
, offset
, bytes
, data
.ret
);
998 static inline bool nvme_qiov_aligned(BlockDriverState
*bs
,
999 const QEMUIOVector
*qiov
)
1002 BDRVNVMeState
*s
= bs
->opaque
;
1004 for (i
= 0; i
< qiov
->niov
; ++i
) {
1005 if (!QEMU_PTR_IS_ALIGNED(qiov
->iov
[i
].iov_base
, s
->page_size
) ||
1006 !QEMU_IS_ALIGNED(qiov
->iov
[i
].iov_len
, s
->page_size
)) {
1007 trace_nvme_qiov_unaligned(qiov
, i
, qiov
->iov
[i
].iov_base
,
1008 qiov
->iov
[i
].iov_len
, s
->page_size
);
1015 static int nvme_co_prw(BlockDriverState
*bs
, uint64_t offset
, uint64_t bytes
,
1016 QEMUIOVector
*qiov
, bool is_write
, int flags
)
1018 BDRVNVMeState
*s
= bs
->opaque
;
1020 uint8_t *buf
= NULL
;
1021 QEMUIOVector local_qiov
;
1023 assert(QEMU_IS_ALIGNED(offset
, s
->page_size
));
1024 assert(QEMU_IS_ALIGNED(bytes
, s
->page_size
));
1025 assert(bytes
<= s
->max_transfer
);
1026 if (nvme_qiov_aligned(bs
, qiov
)) {
1027 return nvme_co_prw_aligned(bs
, offset
, bytes
, qiov
, is_write
, flags
);
1029 trace_nvme_prw_buffered(s
, offset
, bytes
, qiov
->niov
, is_write
);
1030 buf
= qemu_try_blockalign(bs
, bytes
);
1035 qemu_iovec_init(&local_qiov
, 1);
1037 qemu_iovec_to_buf(qiov
, 0, buf
, bytes
);
1039 qemu_iovec_add(&local_qiov
, buf
, bytes
);
1040 r
= nvme_co_prw_aligned(bs
, offset
, bytes
, &local_qiov
, is_write
, flags
);
1041 qemu_iovec_destroy(&local_qiov
);
1042 if (!r
&& !is_write
) {
1043 qemu_iovec_from_buf(qiov
, 0, buf
, bytes
);
1049 static coroutine_fn
int nvme_co_preadv(BlockDriverState
*bs
,
1050 uint64_t offset
, uint64_t bytes
,
1051 QEMUIOVector
*qiov
, int flags
)
1053 return nvme_co_prw(bs
, offset
, bytes
, qiov
, false, flags
);
1056 static coroutine_fn
int nvme_co_pwritev(BlockDriverState
*bs
,
1057 uint64_t offset
, uint64_t bytes
,
1058 QEMUIOVector
*qiov
, int flags
)
1060 return nvme_co_prw(bs
, offset
, bytes
, qiov
, true, flags
);
1063 static coroutine_fn
int nvme_co_flush(BlockDriverState
*bs
)
1065 BDRVNVMeState
*s
= bs
->opaque
;
1066 NVMeQueuePair
*ioq
= s
->queues
[1];
1069 .opcode
= NVME_CMD_FLUSH
,
1070 .nsid
= cpu_to_le32(s
->nsid
),
1073 .ctx
= bdrv_get_aio_context(bs
),
1074 .ret
= -EINPROGRESS
,
1077 assert(s
->nr_queues
> 1);
1078 req
= nvme_get_free_req(ioq
);
1080 nvme_submit_command(s
, ioq
, req
, &cmd
, nvme_rw_cb
, &data
);
1082 data
.co
= qemu_coroutine_self();
1083 if (data
.ret
== -EINPROGRESS
) {
1084 qemu_coroutine_yield();
1091 static int nvme_reopen_prepare(BDRVReopenState
*reopen_state
,
1092 BlockReopenQueue
*queue
, Error
**errp
)
1097 static void nvme_refresh_filename(BlockDriverState
*bs
)
1099 BDRVNVMeState
*s
= bs
->opaque
;
1101 snprintf(bs
->exact_filename
, sizeof(bs
->exact_filename
), "nvme://%s/%i",
1102 s
->device
, s
->nsid
);
1105 static void nvme_refresh_limits(BlockDriverState
*bs
, Error
**errp
)
1107 BDRVNVMeState
*s
= bs
->opaque
;
1109 bs
->bl
.opt_mem_alignment
= s
->page_size
;
1110 bs
->bl
.request_alignment
= s
->page_size
;
1111 bs
->bl
.max_transfer
= s
->max_transfer
;
1114 static void nvme_detach_aio_context(BlockDriverState
*bs
)
1116 BDRVNVMeState
*s
= bs
->opaque
;
1118 aio_set_event_notifier(bdrv_get_aio_context(bs
), &s
->irq_notifier
,
1122 static void nvme_attach_aio_context(BlockDriverState
*bs
,
1123 AioContext
*new_context
)
1125 BDRVNVMeState
*s
= bs
->opaque
;
1127 s
->aio_context
= new_context
;
1128 aio_set_event_notifier(new_context
, &s
->irq_notifier
,
1129 false, nvme_handle_event
, nvme_poll_cb
);
1132 static void nvme_aio_plug(BlockDriverState
*bs
)
1134 BDRVNVMeState
*s
= bs
->opaque
;
1135 assert(!s
->plugged
);
1139 static void nvme_aio_unplug(BlockDriverState
*bs
)
1142 BDRVNVMeState
*s
= bs
->opaque
;
1145 for (i
= 1; i
< s
->nr_queues
; i
++) {
1146 NVMeQueuePair
*q
= s
->queues
[i
];
1147 qemu_mutex_lock(&q
->lock
);
1149 nvme_process_completion(s
, q
);
1150 qemu_mutex_unlock(&q
->lock
);
1154 static void nvme_register_buf(BlockDriverState
*bs
, void *host
, size_t size
)
1157 BDRVNVMeState
*s
= bs
->opaque
;
1159 ret
= qemu_vfio_dma_map(s
->vfio
, host
, size
, false, NULL
);
1161 /* FIXME: we may run out of IOVA addresses after repeated
1162 * bdrv_register_buf/bdrv_unregister_buf, because nvme_vfio_dma_unmap
1163 * doesn't reclaim addresses for fixed mappings. */
1164 error_report("nvme_register_buf failed: %s", strerror(-ret
));
1168 static void nvme_unregister_buf(BlockDriverState
*bs
, void *host
)
1170 BDRVNVMeState
*s
= bs
->opaque
;
1172 qemu_vfio_dma_unmap(s
->vfio
, host
);
1175 static const char *const nvme_strong_runtime_opts
[] = {
1176 NVME_BLOCK_OPT_DEVICE
,
1177 NVME_BLOCK_OPT_NAMESPACE
,
1182 static BlockDriver bdrv_nvme
= {
1183 .format_name
= "nvme",
1184 .protocol_name
= "nvme",
1185 .instance_size
= sizeof(BDRVNVMeState
),
1187 .bdrv_parse_filename
= nvme_parse_filename
,
1188 .bdrv_file_open
= nvme_file_open
,
1189 .bdrv_close
= nvme_close
,
1190 .bdrv_getlength
= nvme_getlength
,
1191 .bdrv_probe_blocksizes
= nvme_probe_blocksizes
,
1193 .bdrv_co_preadv
= nvme_co_preadv
,
1194 .bdrv_co_pwritev
= nvme_co_pwritev
,
1195 .bdrv_co_flush_to_disk
= nvme_co_flush
,
1196 .bdrv_reopen_prepare
= nvme_reopen_prepare
,
1198 .bdrv_refresh_filename
= nvme_refresh_filename
,
1199 .bdrv_refresh_limits
= nvme_refresh_limits
,
1200 .strong_runtime_opts
= nvme_strong_runtime_opts
,
1202 .bdrv_detach_aio_context
= nvme_detach_aio_context
,
1203 .bdrv_attach_aio_context
= nvme_attach_aio_context
,
1205 .bdrv_io_plug
= nvme_aio_plug
,
1206 .bdrv_io_unplug
= nvme_aio_unplug
,
1208 .bdrv_register_buf
= nvme_register_buf
,
1209 .bdrv_unregister_buf
= nvme_unregister_buf
,
1212 static void bdrv_nvme_init(void)
1214 bdrv_register(&bdrv_nvme
);
1217 block_init(bdrv_nvme_init
);