2 * NVMe block driver based on vfio
4 * Copyright 2016 - 2018 Red Hat, Inc.
7 * Fam Zheng <famz@redhat.com>
8 * Paolo Bonzini <pbonzini@redhat.com>
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include <linux/vfio.h>
16 #include "qapi/error.h"
17 #include "qapi/qmp/qdict.h"
18 #include "qapi/qmp/qstring.h"
19 #include "qemu/error-report.h"
20 #include "qemu/cutils.h"
21 #include "qemu/vfio-helpers.h"
22 #include "block/block_int.h"
25 /* TODO: Move nvme spec definitions from hw/block/nvme.h into a separate file
26 * that doesn't depend on dma/pci headers. */
27 #include "sysemu/dma.h"
28 #include "hw/pci/pci.h"
29 #include "hw/block/block.h"
30 #include "hw/block/nvme.h"
32 #define NVME_SQ_ENTRY_BYTES 64
33 #define NVME_CQ_ENTRY_BYTES 16
34 #define NVME_QUEUE_SIZE 128
35 #define NVME_BAR_SIZE 8192
41 /* Hardware MMIO register */
42 volatile uint32_t *doorbell
;
46 BlockCompletionFunc
*cb
;
50 uint64_t prp_list_iova
;
55 CoQueue free_req_queue
;
58 /* Fields protected by BQL */
60 uint8_t *prp_list_pages
;
62 /* Fields protected by @lock */
65 NVMeRequest reqs
[NVME_QUEUE_SIZE
];
71 /* Memory mapped registers */
72 typedef volatile struct {
86 uint8_t reserved1
[0xec0];
87 uint8_t cmd_set_specfic
[0x100];
89 } QEMU_PACKED NVMeRegs
;
91 QEMU_BUILD_BUG_ON(offsetof(NVMeRegs
, doorbells
) != 0x1000);
94 AioContext
*aio_context
;
97 /* The submission/completion queue pairs.
101 NVMeQueuePair
**queues
;
104 /* How many uint32_t elements does each doorbell entry take. */
105 size_t doorbell_scale
;
106 bool write_cache_supported
;
107 EventNotifier irq_notifier
;
108 uint64_t nsze
; /* Namespace size reported by identify command */
109 int nsid
; /* The namespace id to read/write data. */
110 uint64_t max_transfer
;
113 CoMutex dma_map_lock
;
114 CoQueue dma_flush_queue
;
116 /* Total size of mapped qiov, accessed under dma_map_lock */
120 #define NVME_BLOCK_OPT_DEVICE "device"
121 #define NVME_BLOCK_OPT_NAMESPACE "namespace"
123 static QemuOptsList runtime_opts
= {
125 .head
= QTAILQ_HEAD_INITIALIZER(runtime_opts
.head
),
128 .name
= NVME_BLOCK_OPT_DEVICE
,
129 .type
= QEMU_OPT_STRING
,
130 .help
= "NVMe PCI device address",
133 .name
= NVME_BLOCK_OPT_NAMESPACE
,
134 .type
= QEMU_OPT_NUMBER
,
135 .help
= "NVMe namespace",
137 { /* end of list */ }
141 static void nvme_init_queue(BlockDriverState
*bs
, NVMeQueue
*q
,
142 int nentries
, int entry_bytes
, Error
**errp
)
144 BDRVNVMeState
*s
= bs
->opaque
;
148 bytes
= ROUND_UP(nentries
* entry_bytes
, s
->page_size
);
149 q
->head
= q
->tail
= 0;
150 q
->queue
= qemu_try_blockalign0(bs
, bytes
);
153 error_setg(errp
, "Cannot allocate queue");
156 r
= qemu_vfio_dma_map(s
->vfio
, q
->queue
, bytes
, false, &q
->iova
);
158 error_setg(errp
, "Cannot map queue");
162 static void nvme_free_queue_pair(BlockDriverState
*bs
, NVMeQueuePair
*q
)
164 qemu_vfree(q
->prp_list_pages
);
165 qemu_vfree(q
->sq
.queue
);
166 qemu_vfree(q
->cq
.queue
);
167 qemu_mutex_destroy(&q
->lock
);
171 static void nvme_free_req_queue_cb(void *opaque
)
173 NVMeQueuePair
*q
= opaque
;
175 qemu_mutex_lock(&q
->lock
);
176 while (qemu_co_enter_next(&q
->free_req_queue
, &q
->lock
)) {
177 /* Retry all pending requests */
179 qemu_mutex_unlock(&q
->lock
);
182 static NVMeQueuePair
*nvme_create_queue_pair(BlockDriverState
*bs
,
187 BDRVNVMeState
*s
= bs
->opaque
;
188 Error
*local_err
= NULL
;
189 NVMeQueuePair
*q
= g_new0(NVMeQueuePair
, 1);
190 uint64_t prp_list_iova
;
192 qemu_mutex_init(&q
->lock
);
194 qemu_co_queue_init(&q
->free_req_queue
);
195 q
->prp_list_pages
= qemu_blockalign0(bs
, s
->page_size
* NVME_QUEUE_SIZE
);
196 r
= qemu_vfio_dma_map(s
->vfio
, q
->prp_list_pages
,
197 s
->page_size
* NVME_QUEUE_SIZE
,
198 false, &prp_list_iova
);
202 for (i
= 0; i
< NVME_QUEUE_SIZE
; i
++) {
203 NVMeRequest
*req
= &q
->reqs
[i
];
205 req
->prp_list_page
= q
->prp_list_pages
+ i
* s
->page_size
;
206 req
->prp_list_iova
= prp_list_iova
+ i
* s
->page_size
;
208 nvme_init_queue(bs
, &q
->sq
, size
, NVME_SQ_ENTRY_BYTES
, &local_err
);
210 error_propagate(errp
, local_err
);
213 q
->sq
.doorbell
= &s
->regs
->doorbells
[idx
* 2 * s
->doorbell_scale
];
215 nvme_init_queue(bs
, &q
->cq
, size
, NVME_CQ_ENTRY_BYTES
, &local_err
);
217 error_propagate(errp
, local_err
);
220 q
->cq
.doorbell
= &s
->regs
->doorbells
[idx
* 2 * s
->doorbell_scale
+ 1];
224 nvme_free_queue_pair(bs
, q
);
229 static void nvme_kick(BDRVNVMeState
*s
, NVMeQueuePair
*q
)
231 if (s
->plugged
|| !q
->need_kick
) {
234 trace_nvme_kick(s
, q
->index
);
235 assert(!(q
->sq
.tail
& 0xFF00));
236 /* Fence the write to submission queue entry before notifying the device. */
238 *q
->sq
.doorbell
= cpu_to_le32(q
->sq
.tail
);
239 q
->inflight
+= q
->need_kick
;
243 /* Find a free request element if any, otherwise:
244 * a) if in coroutine context, try to wait for one to become available;
245 * b) if not in coroutine, return NULL;
247 static NVMeRequest
*nvme_get_free_req(NVMeQueuePair
*q
)
250 NVMeRequest
*req
= NULL
;
252 qemu_mutex_lock(&q
->lock
);
253 while (q
->inflight
+ q
->need_kick
> NVME_QUEUE_SIZE
- 2) {
254 /* We have to leave one slot empty as that is the full queue case (head
256 if (qemu_in_coroutine()) {
257 trace_nvme_free_req_queue_wait(q
);
258 qemu_co_queue_wait(&q
->free_req_queue
, &q
->lock
);
260 qemu_mutex_unlock(&q
->lock
);
264 for (i
= 0; i
< NVME_QUEUE_SIZE
; i
++) {
265 if (!q
->reqs
[i
].busy
) {
266 q
->reqs
[i
].busy
= true;
271 /* We have checked inflight and need_kick while holding q->lock, so one
272 * free req must be available. */
274 qemu_mutex_unlock(&q
->lock
);
278 static inline int nvme_translate_error(const NvmeCqe
*c
)
280 uint16_t status
= (le16_to_cpu(c
->status
) >> 1) & 0xFF;
282 trace_nvme_error(le32_to_cpu(c
->result
),
283 le16_to_cpu(c
->sq_head
),
284 le16_to_cpu(c
->sq_id
),
286 le16_to_cpu(status
));
301 static bool nvme_process_completion(BDRVNVMeState
*s
, NVMeQueuePair
*q
)
303 bool progress
= false;
308 trace_nvme_process_completion(s
, q
->index
, q
->inflight
);
309 if (q
->busy
|| s
->plugged
) {
310 trace_nvme_process_completion_queue_busy(s
, q
->index
);
314 assert(q
->inflight
>= 0);
315 while (q
->inflight
) {
317 c
= (NvmeCqe
*)&q
->cq
.queue
[q
->cq
.head
* NVME_CQ_ENTRY_BYTES
];
318 if (!c
->cid
|| (le16_to_cpu(c
->status
) & 0x1) == q
->cq_phase
) {
321 q
->cq
.head
= (q
->cq
.head
+ 1) % NVME_QUEUE_SIZE
;
323 q
->cq_phase
= !q
->cq_phase
;
325 cid
= le16_to_cpu(c
->cid
);
326 if (cid
== 0 || cid
> NVME_QUEUE_SIZE
) {
327 fprintf(stderr
, "Unexpected CID in completion queue: %" PRIu32
"\n",
331 assert(cid
<= NVME_QUEUE_SIZE
);
332 trace_nvme_complete_command(s
, q
->index
, cid
);
333 preq
= &q
->reqs
[cid
- 1];
335 assert(req
.cid
== cid
);
338 preq
->cb
= preq
->opaque
= NULL
;
339 qemu_mutex_unlock(&q
->lock
);
340 req
.cb(req
.opaque
, nvme_translate_error(c
));
341 qemu_mutex_lock(&q
->lock
);
342 c
->cid
= cpu_to_le16(0);
344 /* Flip Phase Tag bit. */
345 c
->status
= cpu_to_le16(le16_to_cpu(c
->status
) ^ 0x1);
349 /* Notify the device so it can post more completions. */
351 *q
->cq
.doorbell
= cpu_to_le32(q
->cq
.head
);
352 if (!qemu_co_queue_empty(&q
->free_req_queue
)) {
353 aio_bh_schedule_oneshot(s
->aio_context
, nvme_free_req_queue_cb
, q
);
360 static void nvme_trace_command(const NvmeCmd
*cmd
)
364 for (i
= 0; i
< 8; ++i
) {
365 uint8_t *cmdp
= (uint8_t *)cmd
+ i
* 8;
366 trace_nvme_submit_command_raw(cmdp
[0], cmdp
[1], cmdp
[2], cmdp
[3],
367 cmdp
[4], cmdp
[5], cmdp
[6], cmdp
[7]);
371 static void nvme_submit_command(BDRVNVMeState
*s
, NVMeQueuePair
*q
,
373 NvmeCmd
*cmd
, BlockCompletionFunc cb
,
378 req
->opaque
= opaque
;
379 cmd
->cid
= cpu_to_le32(req
->cid
);
381 trace_nvme_submit_command(s
, q
->index
, req
->cid
);
382 nvme_trace_command(cmd
);
383 qemu_mutex_lock(&q
->lock
);
384 memcpy((uint8_t *)q
->sq
.queue
+
385 q
->sq
.tail
* NVME_SQ_ENTRY_BYTES
, cmd
, sizeof(*cmd
));
386 q
->sq
.tail
= (q
->sq
.tail
+ 1) % NVME_QUEUE_SIZE
;
389 nvme_process_completion(s
, q
);
390 qemu_mutex_unlock(&q
->lock
);
393 static void nvme_cmd_sync_cb(void *opaque
, int ret
)
399 static int nvme_cmd_sync(BlockDriverState
*bs
, NVMeQueuePair
*q
,
403 BDRVNVMeState
*s
= bs
->opaque
;
404 int ret
= -EINPROGRESS
;
405 req
= nvme_get_free_req(q
);
409 nvme_submit_command(s
, q
, req
, cmd
, nvme_cmd_sync_cb
, &ret
);
411 BDRV_POLL_WHILE(bs
, ret
== -EINPROGRESS
);
415 static void nvme_identify(BlockDriverState
*bs
, int namespace, Error
**errp
)
417 BDRVNVMeState
*s
= bs
->opaque
;
424 .opcode
= NVME_ADM_CMD_IDENTIFY
,
425 .cdw10
= cpu_to_le32(0x1),
428 resp
= qemu_try_blockalign0(bs
, sizeof(NvmeIdCtrl
));
430 error_setg(errp
, "Cannot allocate buffer for identify response");
433 idctrl
= (NvmeIdCtrl
*)resp
;
434 idns
= (NvmeIdNs
*)resp
;
435 r
= qemu_vfio_dma_map(s
->vfio
, resp
, sizeof(NvmeIdCtrl
), true, &iova
);
437 error_setg(errp
, "Cannot map buffer for DMA");
440 cmd
.prp1
= cpu_to_le64(iova
);
442 if (nvme_cmd_sync(bs
, s
->queues
[0], &cmd
)) {
443 error_setg(errp
, "Failed to identify controller");
447 if (le32_to_cpu(idctrl
->nn
) < namespace) {
448 error_setg(errp
, "Invalid namespace");
451 s
->write_cache_supported
= le32_to_cpu(idctrl
->vwc
) & 0x1;
452 s
->max_transfer
= (idctrl
->mdts
? 1 << idctrl
->mdts
: 0) * s
->page_size
;
453 /* For now the page list buffer per command is one page, to hold at most
454 * s->page_size / sizeof(uint64_t) entries. */
455 s
->max_transfer
= MIN_NON_ZERO(s
->max_transfer
,
456 s
->page_size
/ sizeof(uint64_t) * s
->page_size
);
458 memset(resp
, 0, 4096);
461 cmd
.nsid
= cpu_to_le32(namespace);
462 if (nvme_cmd_sync(bs
, s
->queues
[0], &cmd
)) {
463 error_setg(errp
, "Failed to identify namespace");
467 s
->nsze
= le64_to_cpu(idns
->nsze
);
470 qemu_vfio_dma_unmap(s
->vfio
, resp
);
474 static bool nvme_poll_queues(BDRVNVMeState
*s
)
476 bool progress
= false;
479 for (i
= 0; i
< s
->nr_queues
; i
++) {
480 NVMeQueuePair
*q
= s
->queues
[i
];
481 qemu_mutex_lock(&q
->lock
);
482 while (nvme_process_completion(s
, q
)) {
486 qemu_mutex_unlock(&q
->lock
);
491 static void nvme_handle_event(EventNotifier
*n
)
493 BDRVNVMeState
*s
= container_of(n
, BDRVNVMeState
, irq_notifier
);
495 trace_nvme_handle_event(s
);
496 aio_context_acquire(s
->aio_context
);
497 event_notifier_test_and_clear(n
);
499 aio_context_release(s
->aio_context
);
502 static bool nvme_add_io_queue(BlockDriverState
*bs
, Error
**errp
)
504 BDRVNVMeState
*s
= bs
->opaque
;
505 int n
= s
->nr_queues
;
508 int queue_size
= NVME_QUEUE_SIZE
;
510 q
= nvme_create_queue_pair(bs
, n
, queue_size
, errp
);
515 .opcode
= NVME_ADM_CMD_CREATE_CQ
,
516 .prp1
= cpu_to_le64(q
->cq
.iova
),
517 .cdw10
= cpu_to_le32(((queue_size
- 1) << 16) | (n
& 0xFFFF)),
518 .cdw11
= cpu_to_le32(0x3),
520 if (nvme_cmd_sync(bs
, s
->queues
[0], &cmd
)) {
521 error_setg(errp
, "Failed to create io queue [%d]", n
);
522 nvme_free_queue_pair(bs
, q
);
526 .opcode
= NVME_ADM_CMD_CREATE_SQ
,
527 .prp1
= cpu_to_le64(q
->sq
.iova
),
528 .cdw10
= cpu_to_le32(((queue_size
- 1) << 16) | (n
& 0xFFFF)),
529 .cdw11
= cpu_to_le32(0x1 | (n
<< 16)),
531 if (nvme_cmd_sync(bs
, s
->queues
[0], &cmd
)) {
532 error_setg(errp
, "Failed to create io queue [%d]", n
);
533 nvme_free_queue_pair(bs
, q
);
536 s
->queues
= g_renew(NVMeQueuePair
*, s
->queues
, n
+ 1);
542 static bool nvme_poll_cb(void *opaque
)
544 EventNotifier
*e
= opaque
;
545 BDRVNVMeState
*s
= container_of(e
, BDRVNVMeState
, irq_notifier
);
546 bool progress
= false;
548 trace_nvme_poll_cb(s
);
549 progress
= nvme_poll_queues(s
);
553 static int nvme_init(BlockDriverState
*bs
, const char *device
, int namespace,
556 BDRVNVMeState
*s
= bs
->opaque
;
560 uint64_t deadline
, now
;
561 Error
*local_err
= NULL
;
563 qemu_co_mutex_init(&s
->dma_map_lock
);
564 qemu_co_queue_init(&s
->dma_flush_queue
);
566 s
->aio_context
= bdrv_get_aio_context(bs
);
567 ret
= event_notifier_init(&s
->irq_notifier
, 0);
569 error_setg(errp
, "Failed to init event notifier");
573 s
->vfio
= qemu_vfio_open_pci(device
, errp
);
579 s
->regs
= qemu_vfio_pci_map_bar(s
->vfio
, 0, 0, NVME_BAR_SIZE
, errp
);
585 /* Perform initialize sequence as described in NVMe spec "7.6.1
586 * Initialization". */
588 cap
= le64_to_cpu(s
->regs
->cap
);
589 if (!(cap
& (1ULL << 37))) {
590 error_setg(errp
, "Device doesn't support NVMe command set");
595 s
->page_size
= MAX(4096, 1 << (12 + ((cap
>> 48) & 0xF)));
596 s
->doorbell_scale
= (4 << (((cap
>> 32) & 0xF))) / sizeof(uint32_t);
597 bs
->bl
.opt_mem_alignment
= s
->page_size
;
598 timeout_ms
= MIN(500 * ((cap
>> 24) & 0xFF), 30000);
600 /* Reset device to get a clean state. */
601 s
->regs
->cc
= cpu_to_le32(le32_to_cpu(s
->regs
->cc
) & 0xFE);
602 /* Wait for CSTS.RDY = 0. */
603 deadline
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) + timeout_ms
* 1000000ULL;
604 while (le32_to_cpu(s
->regs
->csts
) & 0x1) {
605 if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) > deadline
) {
606 error_setg(errp
, "Timeout while waiting for device to reset (%"
614 /* Set up admin queue. */
615 s
->queues
= g_new(NVMeQueuePair
*, 1);
617 s
->queues
[0] = nvme_create_queue_pair(bs
, 0, NVME_QUEUE_SIZE
, errp
);
622 QEMU_BUILD_BUG_ON(NVME_QUEUE_SIZE
& 0xF000);
623 s
->regs
->aqa
= cpu_to_le32((NVME_QUEUE_SIZE
<< 16) | NVME_QUEUE_SIZE
);
624 s
->regs
->asq
= cpu_to_le64(s
->queues
[0]->sq
.iova
);
625 s
->regs
->acq
= cpu_to_le64(s
->queues
[0]->cq
.iova
);
627 /* After setting up all control registers we can enable device now. */
628 s
->regs
->cc
= cpu_to_le32((ctz32(NVME_CQ_ENTRY_BYTES
) << 20) |
629 (ctz32(NVME_SQ_ENTRY_BYTES
) << 16) |
631 /* Wait for CSTS.RDY = 1. */
632 now
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
633 deadline
= now
+ timeout_ms
* 1000000;
634 while (!(le32_to_cpu(s
->regs
->csts
) & 0x1)) {
635 if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) > deadline
) {
636 error_setg(errp
, "Timeout while waiting for device to start (%"
644 ret
= qemu_vfio_pci_init_irq(s
->vfio
, &s
->irq_notifier
,
645 VFIO_PCI_MSIX_IRQ_INDEX
, errp
);
649 aio_set_event_notifier(bdrv_get_aio_context(bs
), &s
->irq_notifier
,
650 false, nvme_handle_event
, nvme_poll_cb
);
652 nvme_identify(bs
, namespace, errp
);
654 error_propagate(errp
, local_err
);
659 /* Set up command queues. */
660 if (!nvme_add_io_queue(bs
, errp
)) {
667 aio_set_event_notifier(bdrv_get_aio_context(bs
), &s
->irq_notifier
,
670 nvme_free_queue_pair(bs
, s
->queues
[0]);
673 qemu_vfio_pci_unmap_bar(s
->vfio
, 0, (void *)s
->regs
, 0, NVME_BAR_SIZE
);
674 qemu_vfio_close(s
->vfio
);
675 event_notifier_cleanup(&s
->irq_notifier
);
679 /* Parse a filename in the format of nvme://XXXX:XX:XX.X/X. Example:
681 * nvme://0000:44:00.0/1
683 * where the "nvme://" is a fixed form of the protocol prefix, the middle part
684 * is the PCI address, and the last part is the namespace number starting from
685 * 1 according to the NVMe spec. */
686 static void nvme_parse_filename(const char *filename
, QDict
*options
,
689 int pref
= strlen("nvme://");
691 if (strlen(filename
) > pref
&& !strncmp(filename
, "nvme://", pref
)) {
692 const char *tmp
= filename
+ pref
;
694 const char *namespace;
696 const char *slash
= strchr(tmp
, '/');
698 qdict_put(options
, NVME_BLOCK_OPT_DEVICE
,
699 qstring_from_str(tmp
));
702 device
= g_strndup(tmp
, slash
- tmp
);
703 qdict_put(options
, NVME_BLOCK_OPT_DEVICE
, qstring_from_str(device
));
705 namespace = slash
+ 1;
706 if (*namespace && qemu_strtoul(namespace, NULL
, 10, &ns
)) {
707 error_setg(errp
, "Invalid namespace '%s', positive number expected",
711 qdict_put(options
, NVME_BLOCK_OPT_NAMESPACE
,
712 qstring_from_str(*namespace ? namespace : "1"));
716 static int nvme_enable_disable_write_cache(BlockDriverState
*bs
, bool enable
,
720 BDRVNVMeState
*s
= bs
->opaque
;
722 .opcode
= NVME_ADM_CMD_SET_FEATURES
,
723 .nsid
= cpu_to_le32(s
->nsid
),
724 .cdw10
= cpu_to_le32(0x06),
725 .cdw11
= cpu_to_le32(enable
? 0x01 : 0x00),
728 ret
= nvme_cmd_sync(bs
, s
->queues
[0], &cmd
);
730 error_setg(errp
, "Failed to configure NVMe write cache");
735 static void nvme_close(BlockDriverState
*bs
)
738 BDRVNVMeState
*s
= bs
->opaque
;
740 for (i
= 0; i
< s
->nr_queues
; ++i
) {
741 nvme_free_queue_pair(bs
, s
->queues
[i
]);
743 aio_set_event_notifier(bdrv_get_aio_context(bs
), &s
->irq_notifier
,
745 qemu_vfio_pci_unmap_bar(s
->vfio
, 0, (void *)s
->regs
, 0, NVME_BAR_SIZE
);
746 qemu_vfio_close(s
->vfio
);
749 static int nvme_file_open(BlockDriverState
*bs
, QDict
*options
, int flags
,
756 BDRVNVMeState
*s
= bs
->opaque
;
758 opts
= qemu_opts_create(&runtime_opts
, NULL
, 0, &error_abort
);
759 qemu_opts_absorb_qdict(opts
, options
, &error_abort
);
760 device
= qemu_opt_get(opts
, NVME_BLOCK_OPT_DEVICE
);
762 error_setg(errp
, "'" NVME_BLOCK_OPT_DEVICE
"' option is required");
767 namespace = qemu_opt_get_number(opts
, NVME_BLOCK_OPT_NAMESPACE
, 1);
768 ret
= nvme_init(bs
, device
, namespace, errp
);
773 if (flags
& BDRV_O_NOCACHE
) {
774 if (!s
->write_cache_supported
) {
776 "NVMe controller doesn't support write cache configuration");
779 ret
= nvme_enable_disable_write_cache(bs
, !(flags
& BDRV_O_NOCACHE
),
786 bs
->supported_write_flags
= BDRV_REQ_FUA
;
793 static int64_t nvme_getlength(BlockDriverState
*bs
)
795 BDRVNVMeState
*s
= bs
->opaque
;
797 return s
->nsze
<< BDRV_SECTOR_BITS
;
800 /* Called with s->dma_map_lock */
801 static coroutine_fn
int nvme_cmd_unmap_qiov(BlockDriverState
*bs
,
805 BDRVNVMeState
*s
= bs
->opaque
;
807 s
->dma_map_count
-= qiov
->size
;
808 if (!s
->dma_map_count
&& !qemu_co_queue_empty(&s
->dma_flush_queue
)) {
809 r
= qemu_vfio_dma_reset_temporary(s
->vfio
);
811 qemu_co_queue_restart_all(&s
->dma_flush_queue
);
817 /* Called with s->dma_map_lock */
818 static coroutine_fn
int nvme_cmd_map_qiov(BlockDriverState
*bs
, NvmeCmd
*cmd
,
819 NVMeRequest
*req
, QEMUIOVector
*qiov
)
821 BDRVNVMeState
*s
= bs
->opaque
;
822 uint64_t *pagelist
= req
->prp_list_page
;
827 assert(QEMU_IS_ALIGNED(qiov
->size
, s
->page_size
));
828 assert(qiov
->size
/ s
->page_size
<= s
->page_size
/ sizeof(uint64_t));
829 for (i
= 0; i
< qiov
->niov
; ++i
) {
833 r
= qemu_vfio_dma_map(s
->vfio
,
834 qiov
->iov
[i
].iov_base
,
835 qiov
->iov
[i
].iov_len
,
837 if (r
== -ENOMEM
&& retry
) {
839 trace_nvme_dma_flush_queue_wait(s
);
840 if (s
->dma_map_count
) {
841 trace_nvme_dma_map_flush(s
);
842 qemu_co_queue_wait(&s
->dma_flush_queue
, &s
->dma_map_lock
);
844 r
= qemu_vfio_dma_reset_temporary(s
->vfio
);
855 for (j
= 0; j
< qiov
->iov
[i
].iov_len
/ s
->page_size
; j
++) {
856 pagelist
[entries
++] = iova
+ j
* s
->page_size
;
858 trace_nvme_cmd_map_qiov_iov(s
, i
, qiov
->iov
[i
].iov_base
,
859 qiov
->iov
[i
].iov_len
/ s
->page_size
);
862 s
->dma_map_count
+= qiov
->size
;
864 assert(entries
<= s
->page_size
/ sizeof(uint64_t));
869 cmd
->prp1
= cpu_to_le64(pagelist
[0]);
873 cmd
->prp1
= cpu_to_le64(pagelist
[0]);
874 cmd
->prp2
= cpu_to_le64(pagelist
[1]);;
877 cmd
->prp1
= cpu_to_le64(pagelist
[0]);
878 cmd
->prp2
= cpu_to_le64(req
->prp_list_iova
);
879 for (i
= 0; i
< entries
- 1; ++i
) {
880 pagelist
[i
] = cpu_to_le64(pagelist
[i
+ 1]);
882 pagelist
[entries
- 1] = 0;
885 trace_nvme_cmd_map_qiov(s
, cmd
, req
, qiov
, entries
);
886 for (i
= 0; i
< entries
; ++i
) {
887 trace_nvme_cmd_map_qiov_pages(s
, i
, pagelist
[i
]);
891 /* No need to unmap [0 - i) iovs even if we've failed, since we don't
892 * increment s->dma_map_count. This is okay for fixed mapping memory areas
893 * because they are already mapped before calling this function; for
894 * temporary mappings, a later nvme_cmd_(un)map_qiov will reclaim by
895 * calling qemu_vfio_dma_reset_temporary when necessary. */
905 static void nvme_rw_cb_bh(void *opaque
)
907 NVMeCoData
*data
= opaque
;
908 qemu_coroutine_enter(data
->co
);
911 static void nvme_rw_cb(void *opaque
, int ret
)
913 NVMeCoData
*data
= opaque
;
916 /* The rw coroutine hasn't yielded, don't try to enter. */
919 aio_bh_schedule_oneshot(data
->ctx
, nvme_rw_cb_bh
, data
);
922 static coroutine_fn
int nvme_co_prw_aligned(BlockDriverState
*bs
,
923 uint64_t offset
, uint64_t bytes
,
929 BDRVNVMeState
*s
= bs
->opaque
;
930 NVMeQueuePair
*ioq
= s
->queues
[1];
932 uint32_t cdw12
= (((bytes
>> BDRV_SECTOR_BITS
) - 1) & 0xFFFF) |
933 (flags
& BDRV_REQ_FUA
? 1 << 30 : 0);
935 .opcode
= is_write
? NVME_CMD_WRITE
: NVME_CMD_READ
,
936 .nsid
= cpu_to_le32(s
->nsid
),
937 .cdw10
= cpu_to_le32((offset
>> BDRV_SECTOR_BITS
) & 0xFFFFFFFF),
938 .cdw11
= cpu_to_le32(((offset
>> BDRV_SECTOR_BITS
) >> 32) & 0xFFFFFFFF),
939 .cdw12
= cpu_to_le32(cdw12
),
942 .ctx
= bdrv_get_aio_context(bs
),
946 trace_nvme_prw_aligned(s
, is_write
, offset
, bytes
, flags
, qiov
->niov
);
947 assert(s
->nr_queues
> 1);
948 req
= nvme_get_free_req(ioq
);
951 qemu_co_mutex_lock(&s
->dma_map_lock
);
952 r
= nvme_cmd_map_qiov(bs
, &cmd
, req
, qiov
);
953 qemu_co_mutex_unlock(&s
->dma_map_lock
);
958 nvme_submit_command(s
, ioq
, req
, &cmd
, nvme_rw_cb
, &data
);
960 data
.co
= qemu_coroutine_self();
961 while (data
.ret
== -EINPROGRESS
) {
962 qemu_coroutine_yield();
965 qemu_co_mutex_lock(&s
->dma_map_lock
);
966 r
= nvme_cmd_unmap_qiov(bs
, qiov
);
967 qemu_co_mutex_unlock(&s
->dma_map_lock
);
972 trace_nvme_rw_done(s
, is_write
, offset
, bytes
, data
.ret
);
976 static inline bool nvme_qiov_aligned(BlockDriverState
*bs
,
977 const QEMUIOVector
*qiov
)
980 BDRVNVMeState
*s
= bs
->opaque
;
982 for (i
= 0; i
< qiov
->niov
; ++i
) {
983 if (!QEMU_PTR_IS_ALIGNED(qiov
->iov
[i
].iov_base
, s
->page_size
) ||
984 !QEMU_IS_ALIGNED(qiov
->iov
[i
].iov_len
, s
->page_size
)) {
985 trace_nvme_qiov_unaligned(qiov
, i
, qiov
->iov
[i
].iov_base
,
986 qiov
->iov
[i
].iov_len
, s
->page_size
);
993 static int nvme_co_prw(BlockDriverState
*bs
, uint64_t offset
, uint64_t bytes
,
994 QEMUIOVector
*qiov
, bool is_write
, int flags
)
996 BDRVNVMeState
*s
= bs
->opaque
;
999 QEMUIOVector local_qiov
;
1001 assert(QEMU_IS_ALIGNED(offset
, s
->page_size
));
1002 assert(QEMU_IS_ALIGNED(bytes
, s
->page_size
));
1003 assert(bytes
<= s
->max_transfer
);
1004 if (nvme_qiov_aligned(bs
, qiov
)) {
1005 return nvme_co_prw_aligned(bs
, offset
, bytes
, qiov
, is_write
, flags
);
1007 trace_nvme_prw_buffered(s
, offset
, bytes
, qiov
->niov
, is_write
);
1008 buf
= qemu_try_blockalign(bs
, bytes
);
1013 qemu_iovec_init(&local_qiov
, 1);
1015 qemu_iovec_to_buf(qiov
, 0, buf
, bytes
);
1017 qemu_iovec_add(&local_qiov
, buf
, bytes
);
1018 r
= nvme_co_prw_aligned(bs
, offset
, bytes
, &local_qiov
, is_write
, flags
);
1019 qemu_iovec_destroy(&local_qiov
);
1020 if (!r
&& !is_write
) {
1021 qemu_iovec_from_buf(qiov
, 0, buf
, bytes
);
1027 static coroutine_fn
int nvme_co_preadv(BlockDriverState
*bs
,
1028 uint64_t offset
, uint64_t bytes
,
1029 QEMUIOVector
*qiov
, int flags
)
1031 return nvme_co_prw(bs
, offset
, bytes
, qiov
, false, flags
);
1034 static coroutine_fn
int nvme_co_pwritev(BlockDriverState
*bs
,
1035 uint64_t offset
, uint64_t bytes
,
1036 QEMUIOVector
*qiov
, int flags
)
1038 return nvme_co_prw(bs
, offset
, bytes
, qiov
, true, flags
);
1041 static coroutine_fn
int nvme_co_flush(BlockDriverState
*bs
)
1043 BDRVNVMeState
*s
= bs
->opaque
;
1044 NVMeQueuePair
*ioq
= s
->queues
[1];
1047 .opcode
= NVME_CMD_FLUSH
,
1048 .nsid
= cpu_to_le32(s
->nsid
),
1051 .ctx
= bdrv_get_aio_context(bs
),
1052 .ret
= -EINPROGRESS
,
1055 assert(s
->nr_queues
> 1);
1056 req
= nvme_get_free_req(ioq
);
1058 nvme_submit_command(s
, ioq
, req
, &cmd
, nvme_rw_cb
, &data
);
1060 data
.co
= qemu_coroutine_self();
1061 if (data
.ret
== -EINPROGRESS
) {
1062 qemu_coroutine_yield();
1069 static int nvme_reopen_prepare(BDRVReopenState
*reopen_state
,
1070 BlockReopenQueue
*queue
, Error
**errp
)
1075 static int64_t coroutine_fn
nvme_co_get_block_status(BlockDriverState
*bs
,
1077 int nb_sectors
, int *pnum
,
1078 BlockDriverState
**file
)
1083 return BDRV_BLOCK_ALLOCATED
| BDRV_BLOCK_OFFSET_VALID
|
1084 (sector_num
<< BDRV_SECTOR_BITS
);
1087 static void nvme_refresh_filename(BlockDriverState
*bs
, QDict
*opts
)
1090 qdict_del(opts
, "filename");
1092 if (!qdict_size(opts
)) {
1093 snprintf(bs
->exact_filename
, sizeof(bs
->exact_filename
), "%s://",
1094 bs
->drv
->format_name
);
1097 qdict_put(opts
, "driver", qstring_from_str(bs
->drv
->format_name
));
1098 bs
->full_open_options
= opts
;
1101 static void nvme_refresh_limits(BlockDriverState
*bs
, Error
**errp
)
1103 BDRVNVMeState
*s
= bs
->opaque
;
1105 bs
->bl
.opt_mem_alignment
= s
->page_size
;
1106 bs
->bl
.request_alignment
= s
->page_size
;
1107 bs
->bl
.max_transfer
= s
->max_transfer
;
1110 static void nvme_detach_aio_context(BlockDriverState
*bs
)
1112 BDRVNVMeState
*s
= bs
->opaque
;
1114 aio_set_event_notifier(bdrv_get_aio_context(bs
), &s
->irq_notifier
,
1118 static void nvme_attach_aio_context(BlockDriverState
*bs
,
1119 AioContext
*new_context
)
1121 BDRVNVMeState
*s
= bs
->opaque
;
1123 s
->aio_context
= new_context
;
1124 aio_set_event_notifier(new_context
, &s
->irq_notifier
,
1125 false, nvme_handle_event
, nvme_poll_cb
);
1128 static void nvme_aio_plug(BlockDriverState
*bs
)
1130 BDRVNVMeState
*s
= bs
->opaque
;
1134 static void nvme_aio_unplug(BlockDriverState
*bs
)
1137 BDRVNVMeState
*s
= bs
->opaque
;
1139 if (!--s
->plugged
) {
1140 for (i
= 1; i
< s
->nr_queues
; i
++) {
1141 NVMeQueuePair
*q
= s
->queues
[i
];
1142 qemu_mutex_lock(&q
->lock
);
1144 nvme_process_completion(s
, q
);
1145 qemu_mutex_unlock(&q
->lock
);
1150 static void nvme_register_buf(BlockDriverState
*bs
, void *host
, size_t size
)
1153 BDRVNVMeState
*s
= bs
->opaque
;
1155 ret
= qemu_vfio_dma_map(s
->vfio
, host
, size
, false, NULL
);
1157 /* FIXME: we may run out of IOVA addresses after repeated
1158 * bdrv_register_buf/bdrv_unregister_buf, because nvme_vfio_dma_unmap
1159 * doesn't reclaim addresses for fixed mappings. */
1160 error_report("nvme_register_buf failed: %s", strerror(-ret
));
1164 static void nvme_unregister_buf(BlockDriverState
*bs
, void *host
)
1166 BDRVNVMeState
*s
= bs
->opaque
;
1168 qemu_vfio_dma_unmap(s
->vfio
, host
);
1171 static BlockDriver bdrv_nvme
= {
1172 .format_name
= "nvme",
1173 .protocol_name
= "nvme",
1174 .instance_size
= sizeof(BDRVNVMeState
),
1176 .bdrv_parse_filename
= nvme_parse_filename
,
1177 .bdrv_file_open
= nvme_file_open
,
1178 .bdrv_close
= nvme_close
,
1179 .bdrv_getlength
= nvme_getlength
,
1181 .bdrv_co_preadv
= nvme_co_preadv
,
1182 .bdrv_co_pwritev
= nvme_co_pwritev
,
1183 .bdrv_co_flush_to_disk
= nvme_co_flush
,
1184 .bdrv_reopen_prepare
= nvme_reopen_prepare
,
1186 .bdrv_co_get_block_status
= nvme_co_get_block_status
,
1188 .bdrv_refresh_filename
= nvme_refresh_filename
,
1189 .bdrv_refresh_limits
= nvme_refresh_limits
,
1191 .bdrv_detach_aio_context
= nvme_detach_aio_context
,
1192 .bdrv_attach_aio_context
= nvme_attach_aio_context
,
1194 .bdrv_io_plug
= nvme_aio_plug
,
1195 .bdrv_io_unplug
= nvme_aio_unplug
,
1197 .bdrv_register_buf
= nvme_register_buf
,
1198 .bdrv_unregister_buf
= nvme_unregister_buf
,
1201 static void bdrv_nvme_init(void)
1203 bdrv_register(&bdrv_nvme
);
1206 block_init(bdrv_nvme_init
);