2 * NVMe block driver based on vfio
4 * Copyright 2016 - 2018 Red Hat, Inc.
7 * Fam Zheng <famz@redhat.com>
8 * Paolo Bonzini <pbonzini@redhat.com>
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include <linux/vfio.h>
16 #include "qapi/error.h"
17 #include "qapi/qmp/qdict.h"
18 #include "qapi/qmp/qstring.h"
19 #include "qemu/error-report.h"
20 #include "qemu/cutils.h"
21 #include "qemu/vfio-helpers.h"
22 #include "block/block_int.h"
25 #include "block/nvme.h"
27 #define NVME_SQ_ENTRY_BYTES 64
28 #define NVME_CQ_ENTRY_BYTES 16
29 #define NVME_QUEUE_SIZE 128
30 #define NVME_BAR_SIZE 8192
36 /* Hardware MMIO register */
37 volatile uint32_t *doorbell
;
41 BlockCompletionFunc
*cb
;
45 uint64_t prp_list_iova
;
50 CoQueue free_req_queue
;
53 /* Fields protected by BQL */
55 uint8_t *prp_list_pages
;
57 /* Fields protected by @lock */
60 NVMeRequest reqs
[NVME_QUEUE_SIZE
];
66 /* Memory mapped registers */
67 typedef volatile struct {
81 uint8_t reserved1
[0xec0];
82 uint8_t cmd_set_specfic
[0x100];
84 } QEMU_PACKED NVMeRegs
;
86 QEMU_BUILD_BUG_ON(offsetof(NVMeRegs
, doorbells
) != 0x1000);
89 AioContext
*aio_context
;
92 /* The submission/completion queue pairs.
96 NVMeQueuePair
**queues
;
99 /* How many uint32_t elements does each doorbell entry take. */
100 size_t doorbell_scale
;
101 bool write_cache_supported
;
102 EventNotifier irq_notifier
;
103 uint64_t nsze
; /* Namespace size reported by identify command */
104 int nsid
; /* The namespace id to read/write data. */
105 uint64_t max_transfer
;
108 CoMutex dma_map_lock
;
109 CoQueue dma_flush_queue
;
111 /* Total size of mapped qiov, accessed under dma_map_lock */
115 #define NVME_BLOCK_OPT_DEVICE "device"
116 #define NVME_BLOCK_OPT_NAMESPACE "namespace"
118 static QemuOptsList runtime_opts
= {
120 .head
= QTAILQ_HEAD_INITIALIZER(runtime_opts
.head
),
123 .name
= NVME_BLOCK_OPT_DEVICE
,
124 .type
= QEMU_OPT_STRING
,
125 .help
= "NVMe PCI device address",
128 .name
= NVME_BLOCK_OPT_NAMESPACE
,
129 .type
= QEMU_OPT_NUMBER
,
130 .help
= "NVMe namespace",
132 { /* end of list */ }
136 static void nvme_init_queue(BlockDriverState
*bs
, NVMeQueue
*q
,
137 int nentries
, int entry_bytes
, Error
**errp
)
139 BDRVNVMeState
*s
= bs
->opaque
;
143 bytes
= ROUND_UP(nentries
* entry_bytes
, s
->page_size
);
144 q
->head
= q
->tail
= 0;
145 q
->queue
= qemu_try_blockalign0(bs
, bytes
);
148 error_setg(errp
, "Cannot allocate queue");
151 r
= qemu_vfio_dma_map(s
->vfio
, q
->queue
, bytes
, false, &q
->iova
);
153 error_setg(errp
, "Cannot map queue");
157 static void nvme_free_queue_pair(BlockDriverState
*bs
, NVMeQueuePair
*q
)
159 qemu_vfree(q
->prp_list_pages
);
160 qemu_vfree(q
->sq
.queue
);
161 qemu_vfree(q
->cq
.queue
);
162 qemu_mutex_destroy(&q
->lock
);
166 static void nvme_free_req_queue_cb(void *opaque
)
168 NVMeQueuePair
*q
= opaque
;
170 qemu_mutex_lock(&q
->lock
);
171 while (qemu_co_enter_next(&q
->free_req_queue
, &q
->lock
)) {
172 /* Retry all pending requests */
174 qemu_mutex_unlock(&q
->lock
);
177 static NVMeQueuePair
*nvme_create_queue_pair(BlockDriverState
*bs
,
182 BDRVNVMeState
*s
= bs
->opaque
;
183 Error
*local_err
= NULL
;
184 NVMeQueuePair
*q
= g_new0(NVMeQueuePair
, 1);
185 uint64_t prp_list_iova
;
187 qemu_mutex_init(&q
->lock
);
189 qemu_co_queue_init(&q
->free_req_queue
);
190 q
->prp_list_pages
= qemu_blockalign0(bs
, s
->page_size
* NVME_QUEUE_SIZE
);
191 r
= qemu_vfio_dma_map(s
->vfio
, q
->prp_list_pages
,
192 s
->page_size
* NVME_QUEUE_SIZE
,
193 false, &prp_list_iova
);
197 for (i
= 0; i
< NVME_QUEUE_SIZE
; i
++) {
198 NVMeRequest
*req
= &q
->reqs
[i
];
200 req
->prp_list_page
= q
->prp_list_pages
+ i
* s
->page_size
;
201 req
->prp_list_iova
= prp_list_iova
+ i
* s
->page_size
;
203 nvme_init_queue(bs
, &q
->sq
, size
, NVME_SQ_ENTRY_BYTES
, &local_err
);
205 error_propagate(errp
, local_err
);
208 q
->sq
.doorbell
= &s
->regs
->doorbells
[idx
* 2 * s
->doorbell_scale
];
210 nvme_init_queue(bs
, &q
->cq
, size
, NVME_CQ_ENTRY_BYTES
, &local_err
);
212 error_propagate(errp
, local_err
);
215 q
->cq
.doorbell
= &s
->regs
->doorbells
[idx
* 2 * s
->doorbell_scale
+ 1];
219 nvme_free_queue_pair(bs
, q
);
224 static void nvme_kick(BDRVNVMeState
*s
, NVMeQueuePair
*q
)
226 if (s
->plugged
|| !q
->need_kick
) {
229 trace_nvme_kick(s
, q
->index
);
230 assert(!(q
->sq
.tail
& 0xFF00));
231 /* Fence the write to submission queue entry before notifying the device. */
233 *q
->sq
.doorbell
= cpu_to_le32(q
->sq
.tail
);
234 q
->inflight
+= q
->need_kick
;
238 /* Find a free request element if any, otherwise:
239 * a) if in coroutine context, try to wait for one to become available;
240 * b) if not in coroutine, return NULL;
242 static NVMeRequest
*nvme_get_free_req(NVMeQueuePair
*q
)
245 NVMeRequest
*req
= NULL
;
247 qemu_mutex_lock(&q
->lock
);
248 while (q
->inflight
+ q
->need_kick
> NVME_QUEUE_SIZE
- 2) {
249 /* We have to leave one slot empty as that is the full queue case (head
251 if (qemu_in_coroutine()) {
252 trace_nvme_free_req_queue_wait(q
);
253 qemu_co_queue_wait(&q
->free_req_queue
, &q
->lock
);
255 qemu_mutex_unlock(&q
->lock
);
259 for (i
= 0; i
< NVME_QUEUE_SIZE
; i
++) {
260 if (!q
->reqs
[i
].busy
) {
261 q
->reqs
[i
].busy
= true;
266 /* We have checked inflight and need_kick while holding q->lock, so one
267 * free req must be available. */
269 qemu_mutex_unlock(&q
->lock
);
273 static inline int nvme_translate_error(const NvmeCqe
*c
)
275 uint16_t status
= (le16_to_cpu(c
->status
) >> 1) & 0xFF;
277 trace_nvme_error(le32_to_cpu(c
->result
),
278 le16_to_cpu(c
->sq_head
),
279 le16_to_cpu(c
->sq_id
),
281 le16_to_cpu(status
));
296 static bool nvme_process_completion(BDRVNVMeState
*s
, NVMeQueuePair
*q
)
298 bool progress
= false;
303 trace_nvme_process_completion(s
, q
->index
, q
->inflight
);
304 if (q
->busy
|| s
->plugged
) {
305 trace_nvme_process_completion_queue_busy(s
, q
->index
);
309 assert(q
->inflight
>= 0);
310 while (q
->inflight
) {
312 c
= (NvmeCqe
*)&q
->cq
.queue
[q
->cq
.head
* NVME_CQ_ENTRY_BYTES
];
313 if (!c
->cid
|| (le16_to_cpu(c
->status
) & 0x1) == q
->cq_phase
) {
316 q
->cq
.head
= (q
->cq
.head
+ 1) % NVME_QUEUE_SIZE
;
318 q
->cq_phase
= !q
->cq_phase
;
320 cid
= le16_to_cpu(c
->cid
);
321 if (cid
== 0 || cid
> NVME_QUEUE_SIZE
) {
322 fprintf(stderr
, "Unexpected CID in completion queue: %" PRIu32
"\n",
326 assert(cid
<= NVME_QUEUE_SIZE
);
327 trace_nvme_complete_command(s
, q
->index
, cid
);
328 preq
= &q
->reqs
[cid
- 1];
330 assert(req
.cid
== cid
);
333 preq
->cb
= preq
->opaque
= NULL
;
334 qemu_mutex_unlock(&q
->lock
);
335 req
.cb(req
.opaque
, nvme_translate_error(c
));
336 qemu_mutex_lock(&q
->lock
);
337 c
->cid
= cpu_to_le16(0);
339 /* Flip Phase Tag bit. */
340 c
->status
= cpu_to_le16(le16_to_cpu(c
->status
) ^ 0x1);
344 /* Notify the device so it can post more completions. */
346 *q
->cq
.doorbell
= cpu_to_le32(q
->cq
.head
);
347 if (!qemu_co_queue_empty(&q
->free_req_queue
)) {
348 aio_bh_schedule_oneshot(s
->aio_context
, nvme_free_req_queue_cb
, q
);
355 static void nvme_trace_command(const NvmeCmd
*cmd
)
359 for (i
= 0; i
< 8; ++i
) {
360 uint8_t *cmdp
= (uint8_t *)cmd
+ i
* 8;
361 trace_nvme_submit_command_raw(cmdp
[0], cmdp
[1], cmdp
[2], cmdp
[3],
362 cmdp
[4], cmdp
[5], cmdp
[6], cmdp
[7]);
366 static void nvme_submit_command(BDRVNVMeState
*s
, NVMeQueuePair
*q
,
368 NvmeCmd
*cmd
, BlockCompletionFunc cb
,
373 req
->opaque
= opaque
;
374 cmd
->cid
= cpu_to_le32(req
->cid
);
376 trace_nvme_submit_command(s
, q
->index
, req
->cid
);
377 nvme_trace_command(cmd
);
378 qemu_mutex_lock(&q
->lock
);
379 memcpy((uint8_t *)q
->sq
.queue
+
380 q
->sq
.tail
* NVME_SQ_ENTRY_BYTES
, cmd
, sizeof(*cmd
));
381 q
->sq
.tail
= (q
->sq
.tail
+ 1) % NVME_QUEUE_SIZE
;
384 nvme_process_completion(s
, q
);
385 qemu_mutex_unlock(&q
->lock
);
388 static void nvme_cmd_sync_cb(void *opaque
, int ret
)
394 static int nvme_cmd_sync(BlockDriverState
*bs
, NVMeQueuePair
*q
,
398 BDRVNVMeState
*s
= bs
->opaque
;
399 int ret
= -EINPROGRESS
;
400 req
= nvme_get_free_req(q
);
404 nvme_submit_command(s
, q
, req
, cmd
, nvme_cmd_sync_cb
, &ret
);
406 BDRV_POLL_WHILE(bs
, ret
== -EINPROGRESS
);
410 static void nvme_identify(BlockDriverState
*bs
, int namespace, Error
**errp
)
412 BDRVNVMeState
*s
= bs
->opaque
;
419 .opcode
= NVME_ADM_CMD_IDENTIFY
,
420 .cdw10
= cpu_to_le32(0x1),
423 resp
= qemu_try_blockalign0(bs
, sizeof(NvmeIdCtrl
));
425 error_setg(errp
, "Cannot allocate buffer for identify response");
428 idctrl
= (NvmeIdCtrl
*)resp
;
429 idns
= (NvmeIdNs
*)resp
;
430 r
= qemu_vfio_dma_map(s
->vfio
, resp
, sizeof(NvmeIdCtrl
), true, &iova
);
432 error_setg(errp
, "Cannot map buffer for DMA");
435 cmd
.prp1
= cpu_to_le64(iova
);
437 if (nvme_cmd_sync(bs
, s
->queues
[0], &cmd
)) {
438 error_setg(errp
, "Failed to identify controller");
442 if (le32_to_cpu(idctrl
->nn
) < namespace) {
443 error_setg(errp
, "Invalid namespace");
446 s
->write_cache_supported
= le32_to_cpu(idctrl
->vwc
) & 0x1;
447 s
->max_transfer
= (idctrl
->mdts
? 1 << idctrl
->mdts
: 0) * s
->page_size
;
448 /* For now the page list buffer per command is one page, to hold at most
449 * s->page_size / sizeof(uint64_t) entries. */
450 s
->max_transfer
= MIN_NON_ZERO(s
->max_transfer
,
451 s
->page_size
/ sizeof(uint64_t) * s
->page_size
);
453 memset(resp
, 0, 4096);
456 cmd
.nsid
= cpu_to_le32(namespace);
457 if (nvme_cmd_sync(bs
, s
->queues
[0], &cmd
)) {
458 error_setg(errp
, "Failed to identify namespace");
462 s
->nsze
= le64_to_cpu(idns
->nsze
);
465 qemu_vfio_dma_unmap(s
->vfio
, resp
);
469 static bool nvme_poll_queues(BDRVNVMeState
*s
)
471 bool progress
= false;
474 for (i
= 0; i
< s
->nr_queues
; i
++) {
475 NVMeQueuePair
*q
= s
->queues
[i
];
476 qemu_mutex_lock(&q
->lock
);
477 while (nvme_process_completion(s
, q
)) {
481 qemu_mutex_unlock(&q
->lock
);
486 static void nvme_handle_event(EventNotifier
*n
)
488 BDRVNVMeState
*s
= container_of(n
, BDRVNVMeState
, irq_notifier
);
490 trace_nvme_handle_event(s
);
491 aio_context_acquire(s
->aio_context
);
492 event_notifier_test_and_clear(n
);
494 aio_context_release(s
->aio_context
);
497 static bool nvme_add_io_queue(BlockDriverState
*bs
, Error
**errp
)
499 BDRVNVMeState
*s
= bs
->opaque
;
500 int n
= s
->nr_queues
;
503 int queue_size
= NVME_QUEUE_SIZE
;
505 q
= nvme_create_queue_pair(bs
, n
, queue_size
, errp
);
510 .opcode
= NVME_ADM_CMD_CREATE_CQ
,
511 .prp1
= cpu_to_le64(q
->cq
.iova
),
512 .cdw10
= cpu_to_le32(((queue_size
- 1) << 16) | (n
& 0xFFFF)),
513 .cdw11
= cpu_to_le32(0x3),
515 if (nvme_cmd_sync(bs
, s
->queues
[0], &cmd
)) {
516 error_setg(errp
, "Failed to create io queue [%d]", n
);
517 nvme_free_queue_pair(bs
, q
);
521 .opcode
= NVME_ADM_CMD_CREATE_SQ
,
522 .prp1
= cpu_to_le64(q
->sq
.iova
),
523 .cdw10
= cpu_to_le32(((queue_size
- 1) << 16) | (n
& 0xFFFF)),
524 .cdw11
= cpu_to_le32(0x1 | (n
<< 16)),
526 if (nvme_cmd_sync(bs
, s
->queues
[0], &cmd
)) {
527 error_setg(errp
, "Failed to create io queue [%d]", n
);
528 nvme_free_queue_pair(bs
, q
);
531 s
->queues
= g_renew(NVMeQueuePair
*, s
->queues
, n
+ 1);
537 static bool nvme_poll_cb(void *opaque
)
539 EventNotifier
*e
= opaque
;
540 BDRVNVMeState
*s
= container_of(e
, BDRVNVMeState
, irq_notifier
);
541 bool progress
= false;
543 trace_nvme_poll_cb(s
);
544 progress
= nvme_poll_queues(s
);
548 static int nvme_init(BlockDriverState
*bs
, const char *device
, int namespace,
551 BDRVNVMeState
*s
= bs
->opaque
;
555 uint64_t deadline
, now
;
556 Error
*local_err
= NULL
;
558 qemu_co_mutex_init(&s
->dma_map_lock
);
559 qemu_co_queue_init(&s
->dma_flush_queue
);
561 s
->aio_context
= bdrv_get_aio_context(bs
);
562 ret
= event_notifier_init(&s
->irq_notifier
, 0);
564 error_setg(errp
, "Failed to init event notifier");
568 s
->vfio
= qemu_vfio_open_pci(device
, errp
);
574 s
->regs
= qemu_vfio_pci_map_bar(s
->vfio
, 0, 0, NVME_BAR_SIZE
, errp
);
580 /* Perform initialize sequence as described in NVMe spec "7.6.1
581 * Initialization". */
583 cap
= le64_to_cpu(s
->regs
->cap
);
584 if (!(cap
& (1ULL << 37))) {
585 error_setg(errp
, "Device doesn't support NVMe command set");
590 s
->page_size
= MAX(4096, 1 << (12 + ((cap
>> 48) & 0xF)));
591 s
->doorbell_scale
= (4 << (((cap
>> 32) & 0xF))) / sizeof(uint32_t);
592 bs
->bl
.opt_mem_alignment
= s
->page_size
;
593 timeout_ms
= MIN(500 * ((cap
>> 24) & 0xFF), 30000);
595 /* Reset device to get a clean state. */
596 s
->regs
->cc
= cpu_to_le32(le32_to_cpu(s
->regs
->cc
) & 0xFE);
597 /* Wait for CSTS.RDY = 0. */
598 deadline
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) + timeout_ms
* 1000000ULL;
599 while (le32_to_cpu(s
->regs
->csts
) & 0x1) {
600 if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) > deadline
) {
601 error_setg(errp
, "Timeout while waiting for device to reset (%"
609 /* Set up admin queue. */
610 s
->queues
= g_new(NVMeQueuePair
*, 1);
612 s
->queues
[0] = nvme_create_queue_pair(bs
, 0, NVME_QUEUE_SIZE
, errp
);
617 QEMU_BUILD_BUG_ON(NVME_QUEUE_SIZE
& 0xF000);
618 s
->regs
->aqa
= cpu_to_le32((NVME_QUEUE_SIZE
<< 16) | NVME_QUEUE_SIZE
);
619 s
->regs
->asq
= cpu_to_le64(s
->queues
[0]->sq
.iova
);
620 s
->regs
->acq
= cpu_to_le64(s
->queues
[0]->cq
.iova
);
622 /* After setting up all control registers we can enable device now. */
623 s
->regs
->cc
= cpu_to_le32((ctz32(NVME_CQ_ENTRY_BYTES
) << 20) |
624 (ctz32(NVME_SQ_ENTRY_BYTES
) << 16) |
626 /* Wait for CSTS.RDY = 1. */
627 now
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
628 deadline
= now
+ timeout_ms
* 1000000;
629 while (!(le32_to_cpu(s
->regs
->csts
) & 0x1)) {
630 if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) > deadline
) {
631 error_setg(errp
, "Timeout while waiting for device to start (%"
639 ret
= qemu_vfio_pci_init_irq(s
->vfio
, &s
->irq_notifier
,
640 VFIO_PCI_MSIX_IRQ_INDEX
, errp
);
644 aio_set_event_notifier(bdrv_get_aio_context(bs
), &s
->irq_notifier
,
645 false, nvme_handle_event
, nvme_poll_cb
);
647 nvme_identify(bs
, namespace, errp
);
649 error_propagate(errp
, local_err
);
654 /* Set up command queues. */
655 if (!nvme_add_io_queue(bs
, errp
)) {
662 aio_set_event_notifier(bdrv_get_aio_context(bs
), &s
->irq_notifier
,
665 nvme_free_queue_pair(bs
, s
->queues
[0]);
668 qemu_vfio_pci_unmap_bar(s
->vfio
, 0, (void *)s
->regs
, 0, NVME_BAR_SIZE
);
669 qemu_vfio_close(s
->vfio
);
670 event_notifier_cleanup(&s
->irq_notifier
);
674 /* Parse a filename in the format of nvme://XXXX:XX:XX.X/X. Example:
676 * nvme://0000:44:00.0/1
678 * where the "nvme://" is a fixed form of the protocol prefix, the middle part
679 * is the PCI address, and the last part is the namespace number starting from
680 * 1 according to the NVMe spec. */
681 static void nvme_parse_filename(const char *filename
, QDict
*options
,
684 int pref
= strlen("nvme://");
686 if (strlen(filename
) > pref
&& !strncmp(filename
, "nvme://", pref
)) {
687 const char *tmp
= filename
+ pref
;
689 const char *namespace;
691 const char *slash
= strchr(tmp
, '/');
693 qdict_put(options
, NVME_BLOCK_OPT_DEVICE
,
694 qstring_from_str(tmp
));
697 device
= g_strndup(tmp
, slash
- tmp
);
698 qdict_put(options
, NVME_BLOCK_OPT_DEVICE
, qstring_from_str(device
));
700 namespace = slash
+ 1;
701 if (*namespace && qemu_strtoul(namespace, NULL
, 10, &ns
)) {
702 error_setg(errp
, "Invalid namespace '%s', positive number expected",
706 qdict_put(options
, NVME_BLOCK_OPT_NAMESPACE
,
707 qstring_from_str(*namespace ? namespace : "1"));
711 static int nvme_enable_disable_write_cache(BlockDriverState
*bs
, bool enable
,
715 BDRVNVMeState
*s
= bs
->opaque
;
717 .opcode
= NVME_ADM_CMD_SET_FEATURES
,
718 .nsid
= cpu_to_le32(s
->nsid
),
719 .cdw10
= cpu_to_le32(0x06),
720 .cdw11
= cpu_to_le32(enable
? 0x01 : 0x00),
723 ret
= nvme_cmd_sync(bs
, s
->queues
[0], &cmd
);
725 error_setg(errp
, "Failed to configure NVMe write cache");
730 static void nvme_close(BlockDriverState
*bs
)
733 BDRVNVMeState
*s
= bs
->opaque
;
735 for (i
= 0; i
< s
->nr_queues
; ++i
) {
736 nvme_free_queue_pair(bs
, s
->queues
[i
]);
738 aio_set_event_notifier(bdrv_get_aio_context(bs
), &s
->irq_notifier
,
740 qemu_vfio_pci_unmap_bar(s
->vfio
, 0, (void *)s
->regs
, 0, NVME_BAR_SIZE
);
741 qemu_vfio_close(s
->vfio
);
744 static int nvme_file_open(BlockDriverState
*bs
, QDict
*options
, int flags
,
751 BDRVNVMeState
*s
= bs
->opaque
;
753 opts
= qemu_opts_create(&runtime_opts
, NULL
, 0, &error_abort
);
754 qemu_opts_absorb_qdict(opts
, options
, &error_abort
);
755 device
= qemu_opt_get(opts
, NVME_BLOCK_OPT_DEVICE
);
757 error_setg(errp
, "'" NVME_BLOCK_OPT_DEVICE
"' option is required");
762 namespace = qemu_opt_get_number(opts
, NVME_BLOCK_OPT_NAMESPACE
, 1);
763 ret
= nvme_init(bs
, device
, namespace, errp
);
768 if (flags
& BDRV_O_NOCACHE
) {
769 if (!s
->write_cache_supported
) {
771 "NVMe controller doesn't support write cache configuration");
774 ret
= nvme_enable_disable_write_cache(bs
, !(flags
& BDRV_O_NOCACHE
),
781 bs
->supported_write_flags
= BDRV_REQ_FUA
;
788 static int64_t nvme_getlength(BlockDriverState
*bs
)
790 BDRVNVMeState
*s
= bs
->opaque
;
792 return s
->nsze
<< BDRV_SECTOR_BITS
;
795 /* Called with s->dma_map_lock */
796 static coroutine_fn
int nvme_cmd_unmap_qiov(BlockDriverState
*bs
,
800 BDRVNVMeState
*s
= bs
->opaque
;
802 s
->dma_map_count
-= qiov
->size
;
803 if (!s
->dma_map_count
&& !qemu_co_queue_empty(&s
->dma_flush_queue
)) {
804 r
= qemu_vfio_dma_reset_temporary(s
->vfio
);
806 qemu_co_queue_restart_all(&s
->dma_flush_queue
);
812 /* Called with s->dma_map_lock */
813 static coroutine_fn
int nvme_cmd_map_qiov(BlockDriverState
*bs
, NvmeCmd
*cmd
,
814 NVMeRequest
*req
, QEMUIOVector
*qiov
)
816 BDRVNVMeState
*s
= bs
->opaque
;
817 uint64_t *pagelist
= req
->prp_list_page
;
822 assert(QEMU_IS_ALIGNED(qiov
->size
, s
->page_size
));
823 assert(qiov
->size
/ s
->page_size
<= s
->page_size
/ sizeof(uint64_t));
824 for (i
= 0; i
< qiov
->niov
; ++i
) {
828 r
= qemu_vfio_dma_map(s
->vfio
,
829 qiov
->iov
[i
].iov_base
,
830 qiov
->iov
[i
].iov_len
,
832 if (r
== -ENOMEM
&& retry
) {
834 trace_nvme_dma_flush_queue_wait(s
);
835 if (s
->dma_map_count
) {
836 trace_nvme_dma_map_flush(s
);
837 qemu_co_queue_wait(&s
->dma_flush_queue
, &s
->dma_map_lock
);
839 r
= qemu_vfio_dma_reset_temporary(s
->vfio
);
850 for (j
= 0; j
< qiov
->iov
[i
].iov_len
/ s
->page_size
; j
++) {
851 pagelist
[entries
++] = iova
+ j
* s
->page_size
;
853 trace_nvme_cmd_map_qiov_iov(s
, i
, qiov
->iov
[i
].iov_base
,
854 qiov
->iov
[i
].iov_len
/ s
->page_size
);
857 s
->dma_map_count
+= qiov
->size
;
859 assert(entries
<= s
->page_size
/ sizeof(uint64_t));
864 cmd
->prp1
= cpu_to_le64(pagelist
[0]);
868 cmd
->prp1
= cpu_to_le64(pagelist
[0]);
869 cmd
->prp2
= cpu_to_le64(pagelist
[1]);;
872 cmd
->prp1
= cpu_to_le64(pagelist
[0]);
873 cmd
->prp2
= cpu_to_le64(req
->prp_list_iova
);
874 for (i
= 0; i
< entries
- 1; ++i
) {
875 pagelist
[i
] = cpu_to_le64(pagelist
[i
+ 1]);
877 pagelist
[entries
- 1] = 0;
880 trace_nvme_cmd_map_qiov(s
, cmd
, req
, qiov
, entries
);
881 for (i
= 0; i
< entries
; ++i
) {
882 trace_nvme_cmd_map_qiov_pages(s
, i
, pagelist
[i
]);
886 /* No need to unmap [0 - i) iovs even if we've failed, since we don't
887 * increment s->dma_map_count. This is okay for fixed mapping memory areas
888 * because they are already mapped before calling this function; for
889 * temporary mappings, a later nvme_cmd_(un)map_qiov will reclaim by
890 * calling qemu_vfio_dma_reset_temporary when necessary. */
900 static void nvme_rw_cb_bh(void *opaque
)
902 NVMeCoData
*data
= opaque
;
903 qemu_coroutine_enter(data
->co
);
906 static void nvme_rw_cb(void *opaque
, int ret
)
908 NVMeCoData
*data
= opaque
;
911 /* The rw coroutine hasn't yielded, don't try to enter. */
914 aio_bh_schedule_oneshot(data
->ctx
, nvme_rw_cb_bh
, data
);
917 static coroutine_fn
int nvme_co_prw_aligned(BlockDriverState
*bs
,
918 uint64_t offset
, uint64_t bytes
,
924 BDRVNVMeState
*s
= bs
->opaque
;
925 NVMeQueuePair
*ioq
= s
->queues
[1];
927 uint32_t cdw12
= (((bytes
>> BDRV_SECTOR_BITS
) - 1) & 0xFFFF) |
928 (flags
& BDRV_REQ_FUA
? 1 << 30 : 0);
930 .opcode
= is_write
? NVME_CMD_WRITE
: NVME_CMD_READ
,
931 .nsid
= cpu_to_le32(s
->nsid
),
932 .cdw10
= cpu_to_le32((offset
>> BDRV_SECTOR_BITS
) & 0xFFFFFFFF),
933 .cdw11
= cpu_to_le32(((offset
>> BDRV_SECTOR_BITS
) >> 32) & 0xFFFFFFFF),
934 .cdw12
= cpu_to_le32(cdw12
),
937 .ctx
= bdrv_get_aio_context(bs
),
941 trace_nvme_prw_aligned(s
, is_write
, offset
, bytes
, flags
, qiov
->niov
);
942 assert(s
->nr_queues
> 1);
943 req
= nvme_get_free_req(ioq
);
946 qemu_co_mutex_lock(&s
->dma_map_lock
);
947 r
= nvme_cmd_map_qiov(bs
, &cmd
, req
, qiov
);
948 qemu_co_mutex_unlock(&s
->dma_map_lock
);
953 nvme_submit_command(s
, ioq
, req
, &cmd
, nvme_rw_cb
, &data
);
955 data
.co
= qemu_coroutine_self();
956 while (data
.ret
== -EINPROGRESS
) {
957 qemu_coroutine_yield();
960 qemu_co_mutex_lock(&s
->dma_map_lock
);
961 r
= nvme_cmd_unmap_qiov(bs
, qiov
);
962 qemu_co_mutex_unlock(&s
->dma_map_lock
);
967 trace_nvme_rw_done(s
, is_write
, offset
, bytes
, data
.ret
);
971 static inline bool nvme_qiov_aligned(BlockDriverState
*bs
,
972 const QEMUIOVector
*qiov
)
975 BDRVNVMeState
*s
= bs
->opaque
;
977 for (i
= 0; i
< qiov
->niov
; ++i
) {
978 if (!QEMU_PTR_IS_ALIGNED(qiov
->iov
[i
].iov_base
, s
->page_size
) ||
979 !QEMU_IS_ALIGNED(qiov
->iov
[i
].iov_len
, s
->page_size
)) {
980 trace_nvme_qiov_unaligned(qiov
, i
, qiov
->iov
[i
].iov_base
,
981 qiov
->iov
[i
].iov_len
, s
->page_size
);
988 static int nvme_co_prw(BlockDriverState
*bs
, uint64_t offset
, uint64_t bytes
,
989 QEMUIOVector
*qiov
, bool is_write
, int flags
)
991 BDRVNVMeState
*s
= bs
->opaque
;
994 QEMUIOVector local_qiov
;
996 assert(QEMU_IS_ALIGNED(offset
, s
->page_size
));
997 assert(QEMU_IS_ALIGNED(bytes
, s
->page_size
));
998 assert(bytes
<= s
->max_transfer
);
999 if (nvme_qiov_aligned(bs
, qiov
)) {
1000 return nvme_co_prw_aligned(bs
, offset
, bytes
, qiov
, is_write
, flags
);
1002 trace_nvme_prw_buffered(s
, offset
, bytes
, qiov
->niov
, is_write
);
1003 buf
= qemu_try_blockalign(bs
, bytes
);
1008 qemu_iovec_init(&local_qiov
, 1);
1010 qemu_iovec_to_buf(qiov
, 0, buf
, bytes
);
1012 qemu_iovec_add(&local_qiov
, buf
, bytes
);
1013 r
= nvme_co_prw_aligned(bs
, offset
, bytes
, &local_qiov
, is_write
, flags
);
1014 qemu_iovec_destroy(&local_qiov
);
1015 if (!r
&& !is_write
) {
1016 qemu_iovec_from_buf(qiov
, 0, buf
, bytes
);
1022 static coroutine_fn
int nvme_co_preadv(BlockDriverState
*bs
,
1023 uint64_t offset
, uint64_t bytes
,
1024 QEMUIOVector
*qiov
, int flags
)
1026 return nvme_co_prw(bs
, offset
, bytes
, qiov
, false, flags
);
1029 static coroutine_fn
int nvme_co_pwritev(BlockDriverState
*bs
,
1030 uint64_t offset
, uint64_t bytes
,
1031 QEMUIOVector
*qiov
, int flags
)
1033 return nvme_co_prw(bs
, offset
, bytes
, qiov
, true, flags
);
1036 static coroutine_fn
int nvme_co_flush(BlockDriverState
*bs
)
1038 BDRVNVMeState
*s
= bs
->opaque
;
1039 NVMeQueuePair
*ioq
= s
->queues
[1];
1042 .opcode
= NVME_CMD_FLUSH
,
1043 .nsid
= cpu_to_le32(s
->nsid
),
1046 .ctx
= bdrv_get_aio_context(bs
),
1047 .ret
= -EINPROGRESS
,
1050 assert(s
->nr_queues
> 1);
1051 req
= nvme_get_free_req(ioq
);
1053 nvme_submit_command(s
, ioq
, req
, &cmd
, nvme_rw_cb
, &data
);
1055 data
.co
= qemu_coroutine_self();
1056 if (data
.ret
== -EINPROGRESS
) {
1057 qemu_coroutine_yield();
1064 static int nvme_reopen_prepare(BDRVReopenState
*reopen_state
,
1065 BlockReopenQueue
*queue
, Error
**errp
)
1070 static int64_t coroutine_fn
nvme_co_get_block_status(BlockDriverState
*bs
,
1072 int nb_sectors
, int *pnum
,
1073 BlockDriverState
**file
)
1078 return BDRV_BLOCK_ALLOCATED
| BDRV_BLOCK_OFFSET_VALID
|
1079 (sector_num
<< BDRV_SECTOR_BITS
);
1082 static void nvme_refresh_filename(BlockDriverState
*bs
, QDict
*opts
)
1085 qdict_del(opts
, "filename");
1087 if (!qdict_size(opts
)) {
1088 snprintf(bs
->exact_filename
, sizeof(bs
->exact_filename
), "%s://",
1089 bs
->drv
->format_name
);
1092 qdict_put(opts
, "driver", qstring_from_str(bs
->drv
->format_name
));
1093 bs
->full_open_options
= opts
;
1096 static void nvme_refresh_limits(BlockDriverState
*bs
, Error
**errp
)
1098 BDRVNVMeState
*s
= bs
->opaque
;
1100 bs
->bl
.opt_mem_alignment
= s
->page_size
;
1101 bs
->bl
.request_alignment
= s
->page_size
;
1102 bs
->bl
.max_transfer
= s
->max_transfer
;
1105 static void nvme_detach_aio_context(BlockDriverState
*bs
)
1107 BDRVNVMeState
*s
= bs
->opaque
;
1109 aio_set_event_notifier(bdrv_get_aio_context(bs
), &s
->irq_notifier
,
1113 static void nvme_attach_aio_context(BlockDriverState
*bs
,
1114 AioContext
*new_context
)
1116 BDRVNVMeState
*s
= bs
->opaque
;
1118 s
->aio_context
= new_context
;
1119 aio_set_event_notifier(new_context
, &s
->irq_notifier
,
1120 false, nvme_handle_event
, nvme_poll_cb
);
1123 static void nvme_aio_plug(BlockDriverState
*bs
)
1125 BDRVNVMeState
*s
= bs
->opaque
;
1129 static void nvme_aio_unplug(BlockDriverState
*bs
)
1132 BDRVNVMeState
*s
= bs
->opaque
;
1134 if (!--s
->plugged
) {
1135 for (i
= 1; i
< s
->nr_queues
; i
++) {
1136 NVMeQueuePair
*q
= s
->queues
[i
];
1137 qemu_mutex_lock(&q
->lock
);
1139 nvme_process_completion(s
, q
);
1140 qemu_mutex_unlock(&q
->lock
);
1145 static void nvme_register_buf(BlockDriverState
*bs
, void *host
, size_t size
)
1148 BDRVNVMeState
*s
= bs
->opaque
;
1150 ret
= qemu_vfio_dma_map(s
->vfio
, host
, size
, false, NULL
);
1152 /* FIXME: we may run out of IOVA addresses after repeated
1153 * bdrv_register_buf/bdrv_unregister_buf, because nvme_vfio_dma_unmap
1154 * doesn't reclaim addresses for fixed mappings. */
1155 error_report("nvme_register_buf failed: %s", strerror(-ret
));
1159 static void nvme_unregister_buf(BlockDriverState
*bs
, void *host
)
1161 BDRVNVMeState
*s
= bs
->opaque
;
1163 qemu_vfio_dma_unmap(s
->vfio
, host
);
1166 static BlockDriver bdrv_nvme
= {
1167 .format_name
= "nvme",
1168 .protocol_name
= "nvme",
1169 .instance_size
= sizeof(BDRVNVMeState
),
1171 .bdrv_parse_filename
= nvme_parse_filename
,
1172 .bdrv_file_open
= nvme_file_open
,
1173 .bdrv_close
= nvme_close
,
1174 .bdrv_getlength
= nvme_getlength
,
1176 .bdrv_co_preadv
= nvme_co_preadv
,
1177 .bdrv_co_pwritev
= nvme_co_pwritev
,
1178 .bdrv_co_flush_to_disk
= nvme_co_flush
,
1179 .bdrv_reopen_prepare
= nvme_reopen_prepare
,
1181 .bdrv_co_get_block_status
= nvme_co_get_block_status
,
1183 .bdrv_refresh_filename
= nvme_refresh_filename
,
1184 .bdrv_refresh_limits
= nvme_refresh_limits
,
1186 .bdrv_detach_aio_context
= nvme_detach_aio_context
,
1187 .bdrv_attach_aio_context
= nvme_attach_aio_context
,
1189 .bdrv_io_plug
= nvme_aio_plug
,
1190 .bdrv_io_unplug
= nvme_aio_unplug
,
1192 .bdrv_register_buf
= nvme_register_buf
,
1193 .bdrv_unregister_buf
= nvme_unregister_buf
,
1196 static void bdrv_nvme_init(void)
1198 bdrv_register(&bdrv_nvme
);
1201 block_init(bdrv_nvme_init
);