2 * NVMe block driver based on vfio
4 * Copyright 2016 - 2018 Red Hat, Inc.
7 * Fam Zheng <famz@redhat.com>
8 * Paolo Bonzini <pbonzini@redhat.com>
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include <linux/vfio.h>
16 #include "qapi/error.h"
17 #include "qapi/qmp/qdict.h"
18 #include "qapi/qmp/qstring.h"
19 #include "qemu/error-report.h"
20 #include "qemu/cutils.h"
21 #include "qemu/option.h"
22 #include "qemu/vfio-helpers.h"
23 #include "block/block_int.h"
26 #include "block/nvme.h"
28 #define NVME_SQ_ENTRY_BYTES 64
29 #define NVME_CQ_ENTRY_BYTES 16
30 #define NVME_QUEUE_SIZE 128
31 #define NVME_BAR_SIZE 8192
37 /* Hardware MMIO register */
38 volatile uint32_t *doorbell
;
42 BlockCompletionFunc
*cb
;
46 uint64_t prp_list_iova
;
51 CoQueue free_req_queue
;
54 /* Fields protected by BQL */
56 uint8_t *prp_list_pages
;
58 /* Fields protected by @lock */
61 NVMeRequest reqs
[NVME_QUEUE_SIZE
];
67 /* Memory mapped registers */
68 typedef volatile struct {
82 uint8_t reserved1
[0xec0];
83 uint8_t cmd_set_specfic
[0x100];
85 } QEMU_PACKED NVMeRegs
;
87 QEMU_BUILD_BUG_ON(offsetof(NVMeRegs
, doorbells
) != 0x1000);
90 AioContext
*aio_context
;
93 /* The submission/completion queue pairs.
97 NVMeQueuePair
**queues
;
100 /* How many uint32_t elements does each doorbell entry take. */
101 size_t doorbell_scale
;
102 bool write_cache_supported
;
103 EventNotifier irq_notifier
;
104 uint64_t nsze
; /* Namespace size reported by identify command */
105 int nsid
; /* The namespace id to read/write data. */
106 uint64_t max_transfer
;
109 CoMutex dma_map_lock
;
110 CoQueue dma_flush_queue
;
112 /* Total size of mapped qiov, accessed under dma_map_lock */
116 #define NVME_BLOCK_OPT_DEVICE "device"
117 #define NVME_BLOCK_OPT_NAMESPACE "namespace"
119 static QemuOptsList runtime_opts
= {
121 .head
= QTAILQ_HEAD_INITIALIZER(runtime_opts
.head
),
124 .name
= NVME_BLOCK_OPT_DEVICE
,
125 .type
= QEMU_OPT_STRING
,
126 .help
= "NVMe PCI device address",
129 .name
= NVME_BLOCK_OPT_NAMESPACE
,
130 .type
= QEMU_OPT_NUMBER
,
131 .help
= "NVMe namespace",
133 { /* end of list */ }
137 static void nvme_init_queue(BlockDriverState
*bs
, NVMeQueue
*q
,
138 int nentries
, int entry_bytes
, Error
**errp
)
140 BDRVNVMeState
*s
= bs
->opaque
;
144 bytes
= ROUND_UP(nentries
* entry_bytes
, s
->page_size
);
145 q
->head
= q
->tail
= 0;
146 q
->queue
= qemu_try_blockalign0(bs
, bytes
);
149 error_setg(errp
, "Cannot allocate queue");
152 r
= qemu_vfio_dma_map(s
->vfio
, q
->queue
, bytes
, false, &q
->iova
);
154 error_setg(errp
, "Cannot map queue");
158 static void nvme_free_queue_pair(BlockDriverState
*bs
, NVMeQueuePair
*q
)
160 qemu_vfree(q
->prp_list_pages
);
161 qemu_vfree(q
->sq
.queue
);
162 qemu_vfree(q
->cq
.queue
);
163 qemu_mutex_destroy(&q
->lock
);
167 static void nvme_free_req_queue_cb(void *opaque
)
169 NVMeQueuePair
*q
= opaque
;
171 qemu_mutex_lock(&q
->lock
);
172 while (qemu_co_enter_next(&q
->free_req_queue
, &q
->lock
)) {
173 /* Retry all pending requests */
175 qemu_mutex_unlock(&q
->lock
);
178 static NVMeQueuePair
*nvme_create_queue_pair(BlockDriverState
*bs
,
183 BDRVNVMeState
*s
= bs
->opaque
;
184 Error
*local_err
= NULL
;
185 NVMeQueuePair
*q
= g_new0(NVMeQueuePair
, 1);
186 uint64_t prp_list_iova
;
188 qemu_mutex_init(&q
->lock
);
190 qemu_co_queue_init(&q
->free_req_queue
);
191 q
->prp_list_pages
= qemu_blockalign0(bs
, s
->page_size
* NVME_QUEUE_SIZE
);
192 r
= qemu_vfio_dma_map(s
->vfio
, q
->prp_list_pages
,
193 s
->page_size
* NVME_QUEUE_SIZE
,
194 false, &prp_list_iova
);
198 for (i
= 0; i
< NVME_QUEUE_SIZE
; i
++) {
199 NVMeRequest
*req
= &q
->reqs
[i
];
201 req
->prp_list_page
= q
->prp_list_pages
+ i
* s
->page_size
;
202 req
->prp_list_iova
= prp_list_iova
+ i
* s
->page_size
;
204 nvme_init_queue(bs
, &q
->sq
, size
, NVME_SQ_ENTRY_BYTES
, &local_err
);
206 error_propagate(errp
, local_err
);
209 q
->sq
.doorbell
= &s
->regs
->doorbells
[idx
* 2 * s
->doorbell_scale
];
211 nvme_init_queue(bs
, &q
->cq
, size
, NVME_CQ_ENTRY_BYTES
, &local_err
);
213 error_propagate(errp
, local_err
);
216 q
->cq
.doorbell
= &s
->regs
->doorbells
[idx
* 2 * s
->doorbell_scale
+ 1];
220 nvme_free_queue_pair(bs
, q
);
225 static void nvme_kick(BDRVNVMeState
*s
, NVMeQueuePair
*q
)
227 if (s
->plugged
|| !q
->need_kick
) {
230 trace_nvme_kick(s
, q
->index
);
231 assert(!(q
->sq
.tail
& 0xFF00));
232 /* Fence the write to submission queue entry before notifying the device. */
234 *q
->sq
.doorbell
= cpu_to_le32(q
->sq
.tail
);
235 q
->inflight
+= q
->need_kick
;
239 /* Find a free request element if any, otherwise:
240 * a) if in coroutine context, try to wait for one to become available;
241 * b) if not in coroutine, return NULL;
243 static NVMeRequest
*nvme_get_free_req(NVMeQueuePair
*q
)
246 NVMeRequest
*req
= NULL
;
248 qemu_mutex_lock(&q
->lock
);
249 while (q
->inflight
+ q
->need_kick
> NVME_QUEUE_SIZE
- 2) {
250 /* We have to leave one slot empty as that is the full queue case (head
252 if (qemu_in_coroutine()) {
253 trace_nvme_free_req_queue_wait(q
);
254 qemu_co_queue_wait(&q
->free_req_queue
, &q
->lock
);
256 qemu_mutex_unlock(&q
->lock
);
260 for (i
= 0; i
< NVME_QUEUE_SIZE
; i
++) {
261 if (!q
->reqs
[i
].busy
) {
262 q
->reqs
[i
].busy
= true;
267 /* We have checked inflight and need_kick while holding q->lock, so one
268 * free req must be available. */
270 qemu_mutex_unlock(&q
->lock
);
274 static inline int nvme_translate_error(const NvmeCqe
*c
)
276 uint16_t status
= (le16_to_cpu(c
->status
) >> 1) & 0xFF;
278 trace_nvme_error(le32_to_cpu(c
->result
),
279 le16_to_cpu(c
->sq_head
),
280 le16_to_cpu(c
->sq_id
),
282 le16_to_cpu(status
));
297 static bool nvme_process_completion(BDRVNVMeState
*s
, NVMeQueuePair
*q
)
299 bool progress
= false;
304 trace_nvme_process_completion(s
, q
->index
, q
->inflight
);
305 if (q
->busy
|| s
->plugged
) {
306 trace_nvme_process_completion_queue_busy(s
, q
->index
);
310 assert(q
->inflight
>= 0);
311 while (q
->inflight
) {
313 c
= (NvmeCqe
*)&q
->cq
.queue
[q
->cq
.head
* NVME_CQ_ENTRY_BYTES
];
314 if (!c
->cid
|| (le16_to_cpu(c
->status
) & 0x1) == q
->cq_phase
) {
317 q
->cq
.head
= (q
->cq
.head
+ 1) % NVME_QUEUE_SIZE
;
319 q
->cq_phase
= !q
->cq_phase
;
321 cid
= le16_to_cpu(c
->cid
);
322 if (cid
== 0 || cid
> NVME_QUEUE_SIZE
) {
323 fprintf(stderr
, "Unexpected CID in completion queue: %" PRIu32
"\n",
327 assert(cid
<= NVME_QUEUE_SIZE
);
328 trace_nvme_complete_command(s
, q
->index
, cid
);
329 preq
= &q
->reqs
[cid
- 1];
331 assert(req
.cid
== cid
);
334 preq
->cb
= preq
->opaque
= NULL
;
335 qemu_mutex_unlock(&q
->lock
);
336 req
.cb(req
.opaque
, nvme_translate_error(c
));
337 qemu_mutex_lock(&q
->lock
);
338 c
->cid
= cpu_to_le16(0);
340 /* Flip Phase Tag bit. */
341 c
->status
= cpu_to_le16(le16_to_cpu(c
->status
) ^ 0x1);
345 /* Notify the device so it can post more completions. */
347 *q
->cq
.doorbell
= cpu_to_le32(q
->cq
.head
);
348 if (!qemu_co_queue_empty(&q
->free_req_queue
)) {
349 aio_bh_schedule_oneshot(s
->aio_context
, nvme_free_req_queue_cb
, q
);
356 static void nvme_trace_command(const NvmeCmd
*cmd
)
360 for (i
= 0; i
< 8; ++i
) {
361 uint8_t *cmdp
= (uint8_t *)cmd
+ i
* 8;
362 trace_nvme_submit_command_raw(cmdp
[0], cmdp
[1], cmdp
[2], cmdp
[3],
363 cmdp
[4], cmdp
[5], cmdp
[6], cmdp
[7]);
367 static void nvme_submit_command(BDRVNVMeState
*s
, NVMeQueuePair
*q
,
369 NvmeCmd
*cmd
, BlockCompletionFunc cb
,
374 req
->opaque
= opaque
;
375 cmd
->cid
= cpu_to_le32(req
->cid
);
377 trace_nvme_submit_command(s
, q
->index
, req
->cid
);
378 nvme_trace_command(cmd
);
379 qemu_mutex_lock(&q
->lock
);
380 memcpy((uint8_t *)q
->sq
.queue
+
381 q
->sq
.tail
* NVME_SQ_ENTRY_BYTES
, cmd
, sizeof(*cmd
));
382 q
->sq
.tail
= (q
->sq
.tail
+ 1) % NVME_QUEUE_SIZE
;
385 nvme_process_completion(s
, q
);
386 qemu_mutex_unlock(&q
->lock
);
389 static void nvme_cmd_sync_cb(void *opaque
, int ret
)
395 static int nvme_cmd_sync(BlockDriverState
*bs
, NVMeQueuePair
*q
,
399 BDRVNVMeState
*s
= bs
->opaque
;
400 int ret
= -EINPROGRESS
;
401 req
= nvme_get_free_req(q
);
405 nvme_submit_command(s
, q
, req
, cmd
, nvme_cmd_sync_cb
, &ret
);
407 BDRV_POLL_WHILE(bs
, ret
== -EINPROGRESS
);
411 static void nvme_identify(BlockDriverState
*bs
, int namespace, Error
**errp
)
413 BDRVNVMeState
*s
= bs
->opaque
;
420 .opcode
= NVME_ADM_CMD_IDENTIFY
,
421 .cdw10
= cpu_to_le32(0x1),
424 resp
= qemu_try_blockalign0(bs
, sizeof(NvmeIdCtrl
));
426 error_setg(errp
, "Cannot allocate buffer for identify response");
429 idctrl
= (NvmeIdCtrl
*)resp
;
430 idns
= (NvmeIdNs
*)resp
;
431 r
= qemu_vfio_dma_map(s
->vfio
, resp
, sizeof(NvmeIdCtrl
), true, &iova
);
433 error_setg(errp
, "Cannot map buffer for DMA");
436 cmd
.prp1
= cpu_to_le64(iova
);
438 if (nvme_cmd_sync(bs
, s
->queues
[0], &cmd
)) {
439 error_setg(errp
, "Failed to identify controller");
443 if (le32_to_cpu(idctrl
->nn
) < namespace) {
444 error_setg(errp
, "Invalid namespace");
447 s
->write_cache_supported
= le32_to_cpu(idctrl
->vwc
) & 0x1;
448 s
->max_transfer
= (idctrl
->mdts
? 1 << idctrl
->mdts
: 0) * s
->page_size
;
449 /* For now the page list buffer per command is one page, to hold at most
450 * s->page_size / sizeof(uint64_t) entries. */
451 s
->max_transfer
= MIN_NON_ZERO(s
->max_transfer
,
452 s
->page_size
/ sizeof(uint64_t) * s
->page_size
);
454 memset(resp
, 0, 4096);
457 cmd
.nsid
= cpu_to_le32(namespace);
458 if (nvme_cmd_sync(bs
, s
->queues
[0], &cmd
)) {
459 error_setg(errp
, "Failed to identify namespace");
463 s
->nsze
= le64_to_cpu(idns
->nsze
);
466 qemu_vfio_dma_unmap(s
->vfio
, resp
);
470 static bool nvme_poll_queues(BDRVNVMeState
*s
)
472 bool progress
= false;
475 for (i
= 0; i
< s
->nr_queues
; i
++) {
476 NVMeQueuePair
*q
= s
->queues
[i
];
477 qemu_mutex_lock(&q
->lock
);
478 while (nvme_process_completion(s
, q
)) {
482 qemu_mutex_unlock(&q
->lock
);
487 static void nvme_handle_event(EventNotifier
*n
)
489 BDRVNVMeState
*s
= container_of(n
, BDRVNVMeState
, irq_notifier
);
491 trace_nvme_handle_event(s
);
492 aio_context_acquire(s
->aio_context
);
493 event_notifier_test_and_clear(n
);
495 aio_context_release(s
->aio_context
);
498 static bool nvme_add_io_queue(BlockDriverState
*bs
, Error
**errp
)
500 BDRVNVMeState
*s
= bs
->opaque
;
501 int n
= s
->nr_queues
;
504 int queue_size
= NVME_QUEUE_SIZE
;
506 q
= nvme_create_queue_pair(bs
, n
, queue_size
, errp
);
511 .opcode
= NVME_ADM_CMD_CREATE_CQ
,
512 .prp1
= cpu_to_le64(q
->cq
.iova
),
513 .cdw10
= cpu_to_le32(((queue_size
- 1) << 16) | (n
& 0xFFFF)),
514 .cdw11
= cpu_to_le32(0x3),
516 if (nvme_cmd_sync(bs
, s
->queues
[0], &cmd
)) {
517 error_setg(errp
, "Failed to create io queue [%d]", n
);
518 nvme_free_queue_pair(bs
, q
);
522 .opcode
= NVME_ADM_CMD_CREATE_SQ
,
523 .prp1
= cpu_to_le64(q
->sq
.iova
),
524 .cdw10
= cpu_to_le32(((queue_size
- 1) << 16) | (n
& 0xFFFF)),
525 .cdw11
= cpu_to_le32(0x1 | (n
<< 16)),
527 if (nvme_cmd_sync(bs
, s
->queues
[0], &cmd
)) {
528 error_setg(errp
, "Failed to create io queue [%d]", n
);
529 nvme_free_queue_pair(bs
, q
);
532 s
->queues
= g_renew(NVMeQueuePair
*, s
->queues
, n
+ 1);
538 static bool nvme_poll_cb(void *opaque
)
540 EventNotifier
*e
= opaque
;
541 BDRVNVMeState
*s
= container_of(e
, BDRVNVMeState
, irq_notifier
);
542 bool progress
= false;
544 trace_nvme_poll_cb(s
);
545 progress
= nvme_poll_queues(s
);
549 static int nvme_init(BlockDriverState
*bs
, const char *device
, int namespace,
552 BDRVNVMeState
*s
= bs
->opaque
;
556 uint64_t deadline
, now
;
557 Error
*local_err
= NULL
;
559 qemu_co_mutex_init(&s
->dma_map_lock
);
560 qemu_co_queue_init(&s
->dma_flush_queue
);
562 s
->aio_context
= bdrv_get_aio_context(bs
);
563 ret
= event_notifier_init(&s
->irq_notifier
, 0);
565 error_setg(errp
, "Failed to init event notifier");
569 s
->vfio
= qemu_vfio_open_pci(device
, errp
);
575 s
->regs
= qemu_vfio_pci_map_bar(s
->vfio
, 0, 0, NVME_BAR_SIZE
, errp
);
581 /* Perform initialize sequence as described in NVMe spec "7.6.1
582 * Initialization". */
584 cap
= le64_to_cpu(s
->regs
->cap
);
585 if (!(cap
& (1ULL << 37))) {
586 error_setg(errp
, "Device doesn't support NVMe command set");
591 s
->page_size
= MAX(4096, 1 << (12 + ((cap
>> 48) & 0xF)));
592 s
->doorbell_scale
= (4 << (((cap
>> 32) & 0xF))) / sizeof(uint32_t);
593 bs
->bl
.opt_mem_alignment
= s
->page_size
;
594 timeout_ms
= MIN(500 * ((cap
>> 24) & 0xFF), 30000);
596 /* Reset device to get a clean state. */
597 s
->regs
->cc
= cpu_to_le32(le32_to_cpu(s
->regs
->cc
) & 0xFE);
598 /* Wait for CSTS.RDY = 0. */
599 deadline
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) + timeout_ms
* 1000000ULL;
600 while (le32_to_cpu(s
->regs
->csts
) & 0x1) {
601 if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) > deadline
) {
602 error_setg(errp
, "Timeout while waiting for device to reset (%"
610 /* Set up admin queue. */
611 s
->queues
= g_new(NVMeQueuePair
*, 1);
613 s
->queues
[0] = nvme_create_queue_pair(bs
, 0, NVME_QUEUE_SIZE
, errp
);
618 QEMU_BUILD_BUG_ON(NVME_QUEUE_SIZE
& 0xF000);
619 s
->regs
->aqa
= cpu_to_le32((NVME_QUEUE_SIZE
<< 16) | NVME_QUEUE_SIZE
);
620 s
->regs
->asq
= cpu_to_le64(s
->queues
[0]->sq
.iova
);
621 s
->regs
->acq
= cpu_to_le64(s
->queues
[0]->cq
.iova
);
623 /* After setting up all control registers we can enable device now. */
624 s
->regs
->cc
= cpu_to_le32((ctz32(NVME_CQ_ENTRY_BYTES
) << 20) |
625 (ctz32(NVME_SQ_ENTRY_BYTES
) << 16) |
627 /* Wait for CSTS.RDY = 1. */
628 now
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
629 deadline
= now
+ timeout_ms
* 1000000;
630 while (!(le32_to_cpu(s
->regs
->csts
) & 0x1)) {
631 if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) > deadline
) {
632 error_setg(errp
, "Timeout while waiting for device to start (%"
640 ret
= qemu_vfio_pci_init_irq(s
->vfio
, &s
->irq_notifier
,
641 VFIO_PCI_MSIX_IRQ_INDEX
, errp
);
645 aio_set_event_notifier(bdrv_get_aio_context(bs
), &s
->irq_notifier
,
646 false, nvme_handle_event
, nvme_poll_cb
);
648 nvme_identify(bs
, namespace, &local_err
);
650 error_propagate(errp
, local_err
);
655 /* Set up command queues. */
656 if (!nvme_add_io_queue(bs
, errp
)) {
660 /* Cleaning up is done in nvme_file_open() upon error. */
664 /* Parse a filename in the format of nvme://XXXX:XX:XX.X/X. Example:
666 * nvme://0000:44:00.0/1
668 * where the "nvme://" is a fixed form of the protocol prefix, the middle part
669 * is the PCI address, and the last part is the namespace number starting from
670 * 1 according to the NVMe spec. */
671 static void nvme_parse_filename(const char *filename
, QDict
*options
,
674 int pref
= strlen("nvme://");
676 if (strlen(filename
) > pref
&& !strncmp(filename
, "nvme://", pref
)) {
677 const char *tmp
= filename
+ pref
;
679 const char *namespace;
681 const char *slash
= strchr(tmp
, '/');
683 qdict_put_str(options
, NVME_BLOCK_OPT_DEVICE
, tmp
);
686 device
= g_strndup(tmp
, slash
- tmp
);
687 qdict_put_str(options
, NVME_BLOCK_OPT_DEVICE
, device
);
689 namespace = slash
+ 1;
690 if (*namespace && qemu_strtoul(namespace, NULL
, 10, &ns
)) {
691 error_setg(errp
, "Invalid namespace '%s', positive number expected",
695 qdict_put_str(options
, NVME_BLOCK_OPT_NAMESPACE
,
696 *namespace ? namespace : "1");
700 static int nvme_enable_disable_write_cache(BlockDriverState
*bs
, bool enable
,
704 BDRVNVMeState
*s
= bs
->opaque
;
706 .opcode
= NVME_ADM_CMD_SET_FEATURES
,
707 .nsid
= cpu_to_le32(s
->nsid
),
708 .cdw10
= cpu_to_le32(0x06),
709 .cdw11
= cpu_to_le32(enable
? 0x01 : 0x00),
712 ret
= nvme_cmd_sync(bs
, s
->queues
[0], &cmd
);
714 error_setg(errp
, "Failed to configure NVMe write cache");
719 static void nvme_close(BlockDriverState
*bs
)
722 BDRVNVMeState
*s
= bs
->opaque
;
724 for (i
= 0; i
< s
->nr_queues
; ++i
) {
725 nvme_free_queue_pair(bs
, s
->queues
[i
]);
728 aio_set_event_notifier(bdrv_get_aio_context(bs
), &s
->irq_notifier
,
730 event_notifier_cleanup(&s
->irq_notifier
);
731 qemu_vfio_pci_unmap_bar(s
->vfio
, 0, (void *)s
->regs
, 0, NVME_BAR_SIZE
);
732 qemu_vfio_close(s
->vfio
);
735 static int nvme_file_open(BlockDriverState
*bs
, QDict
*options
, int flags
,
742 BDRVNVMeState
*s
= bs
->opaque
;
744 opts
= qemu_opts_create(&runtime_opts
, NULL
, 0, &error_abort
);
745 qemu_opts_absorb_qdict(opts
, options
, &error_abort
);
746 device
= qemu_opt_get(opts
, NVME_BLOCK_OPT_DEVICE
);
748 error_setg(errp
, "'" NVME_BLOCK_OPT_DEVICE
"' option is required");
753 namespace = qemu_opt_get_number(opts
, NVME_BLOCK_OPT_NAMESPACE
, 1);
754 ret
= nvme_init(bs
, device
, namespace, errp
);
759 if (flags
& BDRV_O_NOCACHE
) {
760 if (!s
->write_cache_supported
) {
762 "NVMe controller doesn't support write cache configuration");
765 ret
= nvme_enable_disable_write_cache(bs
, !(flags
& BDRV_O_NOCACHE
),
772 bs
->supported_write_flags
= BDRV_REQ_FUA
;
779 static int64_t nvme_getlength(BlockDriverState
*bs
)
781 BDRVNVMeState
*s
= bs
->opaque
;
783 return s
->nsze
<< BDRV_SECTOR_BITS
;
786 /* Called with s->dma_map_lock */
787 static coroutine_fn
int nvme_cmd_unmap_qiov(BlockDriverState
*bs
,
791 BDRVNVMeState
*s
= bs
->opaque
;
793 s
->dma_map_count
-= qiov
->size
;
794 if (!s
->dma_map_count
&& !qemu_co_queue_empty(&s
->dma_flush_queue
)) {
795 r
= qemu_vfio_dma_reset_temporary(s
->vfio
);
797 qemu_co_queue_restart_all(&s
->dma_flush_queue
);
803 /* Called with s->dma_map_lock */
804 static coroutine_fn
int nvme_cmd_map_qiov(BlockDriverState
*bs
, NvmeCmd
*cmd
,
805 NVMeRequest
*req
, QEMUIOVector
*qiov
)
807 BDRVNVMeState
*s
= bs
->opaque
;
808 uint64_t *pagelist
= req
->prp_list_page
;
813 assert(QEMU_IS_ALIGNED(qiov
->size
, s
->page_size
));
814 assert(qiov
->size
/ s
->page_size
<= s
->page_size
/ sizeof(uint64_t));
815 for (i
= 0; i
< qiov
->niov
; ++i
) {
819 r
= qemu_vfio_dma_map(s
->vfio
,
820 qiov
->iov
[i
].iov_base
,
821 qiov
->iov
[i
].iov_len
,
823 if (r
== -ENOMEM
&& retry
) {
825 trace_nvme_dma_flush_queue_wait(s
);
826 if (s
->dma_map_count
) {
827 trace_nvme_dma_map_flush(s
);
828 qemu_co_queue_wait(&s
->dma_flush_queue
, &s
->dma_map_lock
);
830 r
= qemu_vfio_dma_reset_temporary(s
->vfio
);
841 for (j
= 0; j
< qiov
->iov
[i
].iov_len
/ s
->page_size
; j
++) {
842 pagelist
[entries
++] = iova
+ j
* s
->page_size
;
844 trace_nvme_cmd_map_qiov_iov(s
, i
, qiov
->iov
[i
].iov_base
,
845 qiov
->iov
[i
].iov_len
/ s
->page_size
);
848 s
->dma_map_count
+= qiov
->size
;
850 assert(entries
<= s
->page_size
/ sizeof(uint64_t));
855 cmd
->prp1
= cpu_to_le64(pagelist
[0]);
859 cmd
->prp1
= cpu_to_le64(pagelist
[0]);
860 cmd
->prp2
= cpu_to_le64(pagelist
[1]);;
863 cmd
->prp1
= cpu_to_le64(pagelist
[0]);
864 cmd
->prp2
= cpu_to_le64(req
->prp_list_iova
);
865 for (i
= 0; i
< entries
- 1; ++i
) {
866 pagelist
[i
] = cpu_to_le64(pagelist
[i
+ 1]);
868 pagelist
[entries
- 1] = 0;
871 trace_nvme_cmd_map_qiov(s
, cmd
, req
, qiov
, entries
);
872 for (i
= 0; i
< entries
; ++i
) {
873 trace_nvme_cmd_map_qiov_pages(s
, i
, pagelist
[i
]);
877 /* No need to unmap [0 - i) iovs even if we've failed, since we don't
878 * increment s->dma_map_count. This is okay for fixed mapping memory areas
879 * because they are already mapped before calling this function; for
880 * temporary mappings, a later nvme_cmd_(un)map_qiov will reclaim by
881 * calling qemu_vfio_dma_reset_temporary when necessary. */
891 static void nvme_rw_cb_bh(void *opaque
)
893 NVMeCoData
*data
= opaque
;
894 qemu_coroutine_enter(data
->co
);
897 static void nvme_rw_cb(void *opaque
, int ret
)
899 NVMeCoData
*data
= opaque
;
902 /* The rw coroutine hasn't yielded, don't try to enter. */
905 aio_bh_schedule_oneshot(data
->ctx
, nvme_rw_cb_bh
, data
);
908 static coroutine_fn
int nvme_co_prw_aligned(BlockDriverState
*bs
,
909 uint64_t offset
, uint64_t bytes
,
915 BDRVNVMeState
*s
= bs
->opaque
;
916 NVMeQueuePair
*ioq
= s
->queues
[1];
918 uint32_t cdw12
= (((bytes
>> BDRV_SECTOR_BITS
) - 1) & 0xFFFF) |
919 (flags
& BDRV_REQ_FUA
? 1 << 30 : 0);
921 .opcode
= is_write
? NVME_CMD_WRITE
: NVME_CMD_READ
,
922 .nsid
= cpu_to_le32(s
->nsid
),
923 .cdw10
= cpu_to_le32((offset
>> BDRV_SECTOR_BITS
) & 0xFFFFFFFF),
924 .cdw11
= cpu_to_le32(((offset
>> BDRV_SECTOR_BITS
) >> 32) & 0xFFFFFFFF),
925 .cdw12
= cpu_to_le32(cdw12
),
928 .ctx
= bdrv_get_aio_context(bs
),
932 trace_nvme_prw_aligned(s
, is_write
, offset
, bytes
, flags
, qiov
->niov
);
933 assert(s
->nr_queues
> 1);
934 req
= nvme_get_free_req(ioq
);
937 qemu_co_mutex_lock(&s
->dma_map_lock
);
938 r
= nvme_cmd_map_qiov(bs
, &cmd
, req
, qiov
);
939 qemu_co_mutex_unlock(&s
->dma_map_lock
);
944 nvme_submit_command(s
, ioq
, req
, &cmd
, nvme_rw_cb
, &data
);
946 data
.co
= qemu_coroutine_self();
947 while (data
.ret
== -EINPROGRESS
) {
948 qemu_coroutine_yield();
951 qemu_co_mutex_lock(&s
->dma_map_lock
);
952 r
= nvme_cmd_unmap_qiov(bs
, qiov
);
953 qemu_co_mutex_unlock(&s
->dma_map_lock
);
958 trace_nvme_rw_done(s
, is_write
, offset
, bytes
, data
.ret
);
962 static inline bool nvme_qiov_aligned(BlockDriverState
*bs
,
963 const QEMUIOVector
*qiov
)
966 BDRVNVMeState
*s
= bs
->opaque
;
968 for (i
= 0; i
< qiov
->niov
; ++i
) {
969 if (!QEMU_PTR_IS_ALIGNED(qiov
->iov
[i
].iov_base
, s
->page_size
) ||
970 !QEMU_IS_ALIGNED(qiov
->iov
[i
].iov_len
, s
->page_size
)) {
971 trace_nvme_qiov_unaligned(qiov
, i
, qiov
->iov
[i
].iov_base
,
972 qiov
->iov
[i
].iov_len
, s
->page_size
);
979 static int nvme_co_prw(BlockDriverState
*bs
, uint64_t offset
, uint64_t bytes
,
980 QEMUIOVector
*qiov
, bool is_write
, int flags
)
982 BDRVNVMeState
*s
= bs
->opaque
;
985 QEMUIOVector local_qiov
;
987 assert(QEMU_IS_ALIGNED(offset
, s
->page_size
));
988 assert(QEMU_IS_ALIGNED(bytes
, s
->page_size
));
989 assert(bytes
<= s
->max_transfer
);
990 if (nvme_qiov_aligned(bs
, qiov
)) {
991 return nvme_co_prw_aligned(bs
, offset
, bytes
, qiov
, is_write
, flags
);
993 trace_nvme_prw_buffered(s
, offset
, bytes
, qiov
->niov
, is_write
);
994 buf
= qemu_try_blockalign(bs
, bytes
);
999 qemu_iovec_init(&local_qiov
, 1);
1001 qemu_iovec_to_buf(qiov
, 0, buf
, bytes
);
1003 qemu_iovec_add(&local_qiov
, buf
, bytes
);
1004 r
= nvme_co_prw_aligned(bs
, offset
, bytes
, &local_qiov
, is_write
, flags
);
1005 qemu_iovec_destroy(&local_qiov
);
1006 if (!r
&& !is_write
) {
1007 qemu_iovec_from_buf(qiov
, 0, buf
, bytes
);
1013 static coroutine_fn
int nvme_co_preadv(BlockDriverState
*bs
,
1014 uint64_t offset
, uint64_t bytes
,
1015 QEMUIOVector
*qiov
, int flags
)
1017 return nvme_co_prw(bs
, offset
, bytes
, qiov
, false, flags
);
1020 static coroutine_fn
int nvme_co_pwritev(BlockDriverState
*bs
,
1021 uint64_t offset
, uint64_t bytes
,
1022 QEMUIOVector
*qiov
, int flags
)
1024 return nvme_co_prw(bs
, offset
, bytes
, qiov
, true, flags
);
1027 static coroutine_fn
int nvme_co_flush(BlockDriverState
*bs
)
1029 BDRVNVMeState
*s
= bs
->opaque
;
1030 NVMeQueuePair
*ioq
= s
->queues
[1];
1033 .opcode
= NVME_CMD_FLUSH
,
1034 .nsid
= cpu_to_le32(s
->nsid
),
1037 .ctx
= bdrv_get_aio_context(bs
),
1038 .ret
= -EINPROGRESS
,
1041 assert(s
->nr_queues
> 1);
1042 req
= nvme_get_free_req(ioq
);
1044 nvme_submit_command(s
, ioq
, req
, &cmd
, nvme_rw_cb
, &data
);
1046 data
.co
= qemu_coroutine_self();
1047 if (data
.ret
== -EINPROGRESS
) {
1048 qemu_coroutine_yield();
1055 static int nvme_reopen_prepare(BDRVReopenState
*reopen_state
,
1056 BlockReopenQueue
*queue
, Error
**errp
)
1061 static void nvme_refresh_filename(BlockDriverState
*bs
, QDict
*opts
)
1063 qdict_del(opts
, "filename");
1065 if (!qdict_size(opts
)) {
1066 snprintf(bs
->exact_filename
, sizeof(bs
->exact_filename
), "%s://",
1067 bs
->drv
->format_name
);
1070 qdict_put_str(opts
, "driver", bs
->drv
->format_name
);
1071 bs
->full_open_options
= qobject_ref(opts
);
1074 static void nvme_refresh_limits(BlockDriverState
*bs
, Error
**errp
)
1076 BDRVNVMeState
*s
= bs
->opaque
;
1078 bs
->bl
.opt_mem_alignment
= s
->page_size
;
1079 bs
->bl
.request_alignment
= s
->page_size
;
1080 bs
->bl
.max_transfer
= s
->max_transfer
;
1083 static void nvme_detach_aio_context(BlockDriverState
*bs
)
1085 BDRVNVMeState
*s
= bs
->opaque
;
1087 aio_set_event_notifier(bdrv_get_aio_context(bs
), &s
->irq_notifier
,
1091 static void nvme_attach_aio_context(BlockDriverState
*bs
,
1092 AioContext
*new_context
)
1094 BDRVNVMeState
*s
= bs
->opaque
;
1096 s
->aio_context
= new_context
;
1097 aio_set_event_notifier(new_context
, &s
->irq_notifier
,
1098 false, nvme_handle_event
, nvme_poll_cb
);
1101 static void nvme_aio_plug(BlockDriverState
*bs
)
1103 BDRVNVMeState
*s
= bs
->opaque
;
1104 assert(!s
->plugged
);
1108 static void nvme_aio_unplug(BlockDriverState
*bs
)
1111 BDRVNVMeState
*s
= bs
->opaque
;
1114 for (i
= 1; i
< s
->nr_queues
; i
++) {
1115 NVMeQueuePair
*q
= s
->queues
[i
];
1116 qemu_mutex_lock(&q
->lock
);
1118 nvme_process_completion(s
, q
);
1119 qemu_mutex_unlock(&q
->lock
);
1123 static void nvme_register_buf(BlockDriverState
*bs
, void *host
, size_t size
)
1126 BDRVNVMeState
*s
= bs
->opaque
;
1128 ret
= qemu_vfio_dma_map(s
->vfio
, host
, size
, false, NULL
);
1130 /* FIXME: we may run out of IOVA addresses after repeated
1131 * bdrv_register_buf/bdrv_unregister_buf, because nvme_vfio_dma_unmap
1132 * doesn't reclaim addresses for fixed mappings. */
1133 error_report("nvme_register_buf failed: %s", strerror(-ret
));
1137 static void nvme_unregister_buf(BlockDriverState
*bs
, void *host
)
1139 BDRVNVMeState
*s
= bs
->opaque
;
1141 qemu_vfio_dma_unmap(s
->vfio
, host
);
1144 static BlockDriver bdrv_nvme
= {
1145 .format_name
= "nvme",
1146 .protocol_name
= "nvme",
1147 .instance_size
= sizeof(BDRVNVMeState
),
1149 .bdrv_parse_filename
= nvme_parse_filename
,
1150 .bdrv_file_open
= nvme_file_open
,
1151 .bdrv_close
= nvme_close
,
1152 .bdrv_getlength
= nvme_getlength
,
1154 .bdrv_co_preadv
= nvme_co_preadv
,
1155 .bdrv_co_pwritev
= nvme_co_pwritev
,
1156 .bdrv_co_flush_to_disk
= nvme_co_flush
,
1157 .bdrv_reopen_prepare
= nvme_reopen_prepare
,
1159 .bdrv_refresh_filename
= nvme_refresh_filename
,
1160 .bdrv_refresh_limits
= nvme_refresh_limits
,
1162 .bdrv_detach_aio_context
= nvme_detach_aio_context
,
1163 .bdrv_attach_aio_context
= nvme_attach_aio_context
,
1165 .bdrv_io_plug
= nvme_aio_plug
,
1166 .bdrv_io_unplug
= nvme_aio_unplug
,
1168 .bdrv_register_buf
= nvme_register_buf
,
1169 .bdrv_unregister_buf
= nvme_unregister_buf
,
1172 static void bdrv_nvme_init(void)
1174 bdrv_register(&bdrv_nvme
);
1177 block_init(bdrv_nvme_init
);