2 * NVMe block driver based on vfio
4 * Copyright 2016 - 2018 Red Hat, Inc.
7 * Fam Zheng <famz@redhat.com>
8 * Paolo Bonzini <pbonzini@redhat.com>
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include <linux/vfio.h>
16 #include "qapi/error.h"
17 #include "qapi/qmp/qdict.h"
18 #include "qapi/qmp/qstring.h"
19 #include "qemu/error-report.h"
20 #include "qemu/main-loop.h"
21 #include "qemu/module.h"
22 #include "qemu/cutils.h"
23 #include "qemu/option.h"
24 #include "qemu/vfio-helpers.h"
25 #include "block/block_int.h"
26 #include "sysemu/replay.h"
29 #include "block/nvme.h"
31 #define NVME_SQ_ENTRY_BYTES 64
32 #define NVME_CQ_ENTRY_BYTES 16
33 #define NVME_QUEUE_SIZE 128
34 #define NVME_BAR_SIZE 8192
37 * We have to leave one slot empty as that is the full queue case where
40 #define NVME_NUM_REQS (NVME_QUEUE_SIZE - 1)
42 typedef struct BDRVNVMeState BDRVNVMeState
;
48 /* Hardware MMIO register */
49 volatile uint32_t *doorbell
;
53 BlockCompletionFunc
*cb
;
57 uint64_t prp_list_iova
;
58 int free_req_next
; /* q->reqs[] index of next free req */
64 /* Read from I/O code path, initialized under BQL */
68 /* Fields protected by BQL */
69 uint8_t *prp_list_pages
;
71 /* Fields protected by @lock */
72 CoQueue free_req_queue
;
76 NVMeRequest reqs
[NVME_NUM_REQS
];
80 /* Thread-safe, no lock necessary */
81 QEMUBH
*completion_bh
;
84 /* Memory mapped registers */
85 typedef volatile struct {
99 uint8_t reserved1
[0xec0];
100 uint8_t cmd_set_specfic
[0x100];
101 uint32_t doorbells
[];
104 QEMU_BUILD_BUG_ON(offsetof(NVMeRegs
, doorbells
) != 0x1000);
106 #define INDEX_ADMIN 0
107 #define INDEX_IO(n) (1 + n)
109 /* This driver shares a single MSIX IRQ for the admin and I/O queues */
111 MSIX_SHARED_IRQ_IDX
= 0,
115 struct BDRVNVMeState
{
116 AioContext
*aio_context
;
119 /* The submission/completion queue pairs.
123 NVMeQueuePair
**queues
;
126 /* How many uint32_t elements does each doorbell entry take. */
127 size_t doorbell_scale
;
128 bool write_cache_supported
;
129 EventNotifier irq_notifier
[MSIX_IRQ_COUNT
];
131 uint64_t nsze
; /* Namespace size reported by identify command */
132 int nsid
; /* The namespace id to read/write data. */
135 uint64_t max_transfer
;
138 bool supports_write_zeroes
;
139 bool supports_discard
;
141 CoMutex dma_map_lock
;
142 CoQueue dma_flush_queue
;
144 /* Total size of mapped qiov, accessed under dma_map_lock */
147 /* PCI address (required for nvme_refresh_filename()) */
151 #define NVME_BLOCK_OPT_DEVICE "device"
152 #define NVME_BLOCK_OPT_NAMESPACE "namespace"
154 static void nvme_process_completion_bh(void *opaque
);
156 static QemuOptsList runtime_opts
= {
158 .head
= QTAILQ_HEAD_INITIALIZER(runtime_opts
.head
),
161 .name
= NVME_BLOCK_OPT_DEVICE
,
162 .type
= QEMU_OPT_STRING
,
163 .help
= "NVMe PCI device address",
166 .name
= NVME_BLOCK_OPT_NAMESPACE
,
167 .type
= QEMU_OPT_NUMBER
,
168 .help
= "NVMe namespace",
170 { /* end of list */ }
174 static void nvme_init_queue(BDRVNVMeState
*s
, NVMeQueue
*q
,
175 int nentries
, int entry_bytes
, Error
**errp
)
180 bytes
= ROUND_UP(nentries
* entry_bytes
, s
->page_size
);
181 q
->head
= q
->tail
= 0;
182 q
->queue
= qemu_try_memalign(s
->page_size
, bytes
);
184 error_setg(errp
, "Cannot allocate queue");
187 memset(q
->queue
, 0, bytes
);
188 r
= qemu_vfio_dma_map(s
->vfio
, q
->queue
, bytes
, false, &q
->iova
);
190 error_setg(errp
, "Cannot map queue");
194 static void nvme_free_queue_pair(NVMeQueuePair
*q
)
196 if (q
->completion_bh
) {
197 qemu_bh_delete(q
->completion_bh
);
199 qemu_vfree(q
->prp_list_pages
);
200 qemu_vfree(q
->sq
.queue
);
201 qemu_vfree(q
->cq
.queue
);
202 qemu_mutex_destroy(&q
->lock
);
206 static void nvme_free_req_queue_cb(void *opaque
)
208 NVMeQueuePair
*q
= opaque
;
210 qemu_mutex_lock(&q
->lock
);
211 while (qemu_co_enter_next(&q
->free_req_queue
, &q
->lock
)) {
212 /* Retry all pending requests */
214 qemu_mutex_unlock(&q
->lock
);
217 static NVMeQueuePair
*nvme_create_queue_pair(BDRVNVMeState
*s
,
218 AioContext
*aio_context
,
223 Error
*local_err
= NULL
;
225 uint64_t prp_list_iova
;
227 q
= g_try_new0(NVMeQueuePair
, 1);
231 q
->prp_list_pages
= qemu_try_memalign(s
->page_size
,
232 s
->page_size
* NVME_NUM_REQS
);
233 if (!q
->prp_list_pages
) {
236 memset(q
->prp_list_pages
, 0, s
->page_size
* NVME_NUM_REQS
);
237 qemu_mutex_init(&q
->lock
);
240 qemu_co_queue_init(&q
->free_req_queue
);
241 q
->completion_bh
= aio_bh_new(aio_context
, nvme_process_completion_bh
, q
);
242 r
= qemu_vfio_dma_map(s
->vfio
, q
->prp_list_pages
,
243 s
->page_size
* NVME_NUM_REQS
,
244 false, &prp_list_iova
);
248 q
->free_req_head
= -1;
249 for (i
= 0; i
< NVME_NUM_REQS
; i
++) {
250 NVMeRequest
*req
= &q
->reqs
[i
];
252 req
->free_req_next
= q
->free_req_head
;
253 q
->free_req_head
= i
;
254 req
->prp_list_page
= q
->prp_list_pages
+ i
* s
->page_size
;
255 req
->prp_list_iova
= prp_list_iova
+ i
* s
->page_size
;
258 nvme_init_queue(s
, &q
->sq
, size
, NVME_SQ_ENTRY_BYTES
, &local_err
);
260 error_propagate(errp
, local_err
);
263 q
->sq
.doorbell
= &s
->regs
->doorbells
[idx
* 2 * s
->doorbell_scale
];
265 nvme_init_queue(s
, &q
->cq
, size
, NVME_CQ_ENTRY_BYTES
, &local_err
);
267 error_propagate(errp
, local_err
);
270 q
->cq
.doorbell
= &s
->regs
->doorbells
[(idx
* 2 + 1) * s
->doorbell_scale
];
274 nvme_free_queue_pair(q
);
279 static void nvme_kick(NVMeQueuePair
*q
)
281 BDRVNVMeState
*s
= q
->s
;
283 if (s
->plugged
|| !q
->need_kick
) {
286 trace_nvme_kick(s
, q
->index
);
287 assert(!(q
->sq
.tail
& 0xFF00));
288 /* Fence the write to submission queue entry before notifying the device. */
290 *q
->sq
.doorbell
= cpu_to_le32(q
->sq
.tail
);
291 q
->inflight
+= q
->need_kick
;
295 /* Find a free request element if any, otherwise:
296 * a) if in coroutine context, try to wait for one to become available;
297 * b) if not in coroutine, return NULL;
299 static NVMeRequest
*nvme_get_free_req(NVMeQueuePair
*q
)
303 qemu_mutex_lock(&q
->lock
);
305 while (q
->free_req_head
== -1) {
306 if (qemu_in_coroutine()) {
307 trace_nvme_free_req_queue_wait(q
);
308 qemu_co_queue_wait(&q
->free_req_queue
, &q
->lock
);
310 qemu_mutex_unlock(&q
->lock
);
315 req
= &q
->reqs
[q
->free_req_head
];
316 q
->free_req_head
= req
->free_req_next
;
317 req
->free_req_next
= -1;
319 qemu_mutex_unlock(&q
->lock
);
324 static void nvme_put_free_req_locked(NVMeQueuePair
*q
, NVMeRequest
*req
)
326 req
->free_req_next
= q
->free_req_head
;
327 q
->free_req_head
= req
- q
->reqs
;
331 static void nvme_wake_free_req_locked(NVMeQueuePair
*q
)
333 if (!qemu_co_queue_empty(&q
->free_req_queue
)) {
334 replay_bh_schedule_oneshot_event(q
->s
->aio_context
,
335 nvme_free_req_queue_cb
, q
);
339 /* Insert a request in the freelist and wake waiters */
340 static void nvme_put_free_req_and_wake(NVMeQueuePair
*q
, NVMeRequest
*req
)
342 qemu_mutex_lock(&q
->lock
);
343 nvme_put_free_req_locked(q
, req
);
344 nvme_wake_free_req_locked(q
);
345 qemu_mutex_unlock(&q
->lock
);
348 static inline int nvme_translate_error(const NvmeCqe
*c
)
350 uint16_t status
= (le16_to_cpu(c
->status
) >> 1) & 0xFF;
352 trace_nvme_error(le32_to_cpu(c
->result
),
353 le16_to_cpu(c
->sq_head
),
354 le16_to_cpu(c
->sq_id
),
356 le16_to_cpu(status
));
371 static bool nvme_process_completion(NVMeQueuePair
*q
)
373 BDRVNVMeState
*s
= q
->s
;
374 bool progress
= false;
379 trace_nvme_process_completion(s
, q
->index
, q
->inflight
);
381 trace_nvme_process_completion_queue_plugged(s
, q
->index
);
386 * Support re-entrancy when a request cb() function invokes aio_poll().
387 * Pending completions must be visible to aio_poll() so that a cb()
388 * function can wait for the completion of another request.
390 * The aio_poll() loop will execute our BH and we'll resume completion
393 qemu_bh_schedule(q
->completion_bh
);
395 assert(q
->inflight
>= 0);
396 while (q
->inflight
) {
400 c
= (NvmeCqe
*)&q
->cq
.queue
[q
->cq
.head
* NVME_CQ_ENTRY_BYTES
];
401 if ((le16_to_cpu(c
->status
) & 0x1) == q
->cq_phase
) {
404 ret
= nvme_translate_error(c
);
405 q
->cq
.head
= (q
->cq
.head
+ 1) % NVME_QUEUE_SIZE
;
407 q
->cq_phase
= !q
->cq_phase
;
409 cid
= le16_to_cpu(c
->cid
);
410 if (cid
== 0 || cid
> NVME_QUEUE_SIZE
) {
411 fprintf(stderr
, "Unexpected CID in completion queue: %" PRIu32
"\n",
415 trace_nvme_complete_command(s
, q
->index
, cid
);
416 preq
= &q
->reqs
[cid
- 1];
418 assert(req
.cid
== cid
);
420 nvme_put_free_req_locked(q
, preq
);
421 preq
->cb
= preq
->opaque
= NULL
;
423 qemu_mutex_unlock(&q
->lock
);
424 req
.cb(req
.opaque
, ret
);
425 qemu_mutex_lock(&q
->lock
);
429 /* Notify the device so it can post more completions. */
431 *q
->cq
.doorbell
= cpu_to_le32(q
->cq
.head
);
432 nvme_wake_free_req_locked(q
);
435 qemu_bh_cancel(q
->completion_bh
);
440 static void nvme_process_completion_bh(void *opaque
)
442 NVMeQueuePair
*q
= opaque
;
445 * We're being invoked because a nvme_process_completion() cb() function
446 * called aio_poll(). The callback may be waiting for further completions
447 * so notify the device that it has space to fill in more completions now.
450 *q
->cq
.doorbell
= cpu_to_le32(q
->cq
.head
);
451 nvme_wake_free_req_locked(q
);
453 nvme_process_completion(q
);
456 static void nvme_trace_command(const NvmeCmd
*cmd
)
460 if (!trace_event_get_state_backends(TRACE_NVME_SUBMIT_COMMAND_RAW
)) {
463 for (i
= 0; i
< 8; ++i
) {
464 uint8_t *cmdp
= (uint8_t *)cmd
+ i
* 8;
465 trace_nvme_submit_command_raw(cmdp
[0], cmdp
[1], cmdp
[2], cmdp
[3],
466 cmdp
[4], cmdp
[5], cmdp
[6], cmdp
[7]);
470 static void nvme_submit_command(NVMeQueuePair
*q
, NVMeRequest
*req
,
471 NvmeCmd
*cmd
, BlockCompletionFunc cb
,
476 req
->opaque
= opaque
;
477 cmd
->cid
= cpu_to_le32(req
->cid
);
479 trace_nvme_submit_command(q
->s
, q
->index
, req
->cid
);
480 nvme_trace_command(cmd
);
481 qemu_mutex_lock(&q
->lock
);
482 memcpy((uint8_t *)q
->sq
.queue
+
483 q
->sq
.tail
* NVME_SQ_ENTRY_BYTES
, cmd
, sizeof(*cmd
));
484 q
->sq
.tail
= (q
->sq
.tail
+ 1) % NVME_QUEUE_SIZE
;
487 nvme_process_completion(q
);
488 qemu_mutex_unlock(&q
->lock
);
491 static void nvme_cmd_sync_cb(void *opaque
, int ret
)
498 static int nvme_cmd_sync(BlockDriverState
*bs
, NVMeQueuePair
*q
,
501 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
503 int ret
= -EINPROGRESS
;
504 req
= nvme_get_free_req(q
);
508 nvme_submit_command(q
, req
, cmd
, nvme_cmd_sync_cb
, &ret
);
510 AIO_WAIT_WHILE(aio_context
, ret
== -EINPROGRESS
);
514 static void nvme_identify(BlockDriverState
*bs
, int namespace, Error
**errp
)
516 BDRVNVMeState
*s
= bs
->opaque
;
526 .opcode
= NVME_ADM_CMD_IDENTIFY
,
527 .cdw10
= cpu_to_le32(0x1),
530 id
= qemu_try_memalign(s
->page_size
, sizeof(*id
));
532 error_setg(errp
, "Cannot allocate buffer for identify response");
535 r
= qemu_vfio_dma_map(s
->vfio
, id
, sizeof(*id
), true, &iova
);
537 error_setg(errp
, "Cannot map buffer for DMA");
541 memset(id
, 0, sizeof(*id
));
542 cmd
.dptr
.prp1
= cpu_to_le64(iova
);
543 if (nvme_cmd_sync(bs
, s
->queues
[INDEX_ADMIN
], &cmd
)) {
544 error_setg(errp
, "Failed to identify controller");
548 if (le32_to_cpu(id
->ctrl
.nn
) < namespace) {
549 error_setg(errp
, "Invalid namespace");
552 s
->write_cache_supported
= le32_to_cpu(id
->ctrl
.vwc
) & 0x1;
553 s
->max_transfer
= (id
->ctrl
.mdts
? 1 << id
->ctrl
.mdts
: 0) * s
->page_size
;
554 /* For now the page list buffer per command is one page, to hold at most
555 * s->page_size / sizeof(uint64_t) entries. */
556 s
->max_transfer
= MIN_NON_ZERO(s
->max_transfer
,
557 s
->page_size
/ sizeof(uint64_t) * s
->page_size
);
559 oncs
= le16_to_cpu(id
->ctrl
.oncs
);
560 s
->supports_write_zeroes
= !!(oncs
& NVME_ONCS_WRITE_ZEROES
);
561 s
->supports_discard
= !!(oncs
& NVME_ONCS_DSM
);
563 memset(id
, 0, sizeof(*id
));
565 cmd
.nsid
= cpu_to_le32(namespace);
566 if (nvme_cmd_sync(bs
, s
->queues
[INDEX_ADMIN
], &cmd
)) {
567 error_setg(errp
, "Failed to identify namespace");
571 s
->nsze
= le64_to_cpu(id
->ns
.nsze
);
572 lbaf
= &id
->ns
.lbaf
[NVME_ID_NS_FLBAS_INDEX(id
->ns
.flbas
)];
574 if (NVME_ID_NS_DLFEAT_WRITE_ZEROES(id
->ns
.dlfeat
) &&
575 NVME_ID_NS_DLFEAT_READ_BEHAVIOR(id
->ns
.dlfeat
) ==
576 NVME_ID_NS_DLFEAT_READ_BEHAVIOR_ZEROES
) {
577 bs
->supported_write_flags
|= BDRV_REQ_MAY_UNMAP
;
581 error_setg(errp
, "Namespaces with metadata are not yet supported");
585 if (lbaf
->ds
< BDRV_SECTOR_BITS
|| lbaf
->ds
> 12 ||
586 (1 << lbaf
->ds
) > s
->page_size
)
588 error_setg(errp
, "Namespace has unsupported block size (2^%d)",
593 s
->blkshift
= lbaf
->ds
;
595 qemu_vfio_dma_unmap(s
->vfio
, id
);
599 static bool nvme_poll_queue(NVMeQueuePair
*q
)
601 bool progress
= false;
603 const size_t cqe_offset
= q
->cq
.head
* NVME_CQ_ENTRY_BYTES
;
604 NvmeCqe
*cqe
= (NvmeCqe
*)&q
->cq
.queue
[cqe_offset
];
607 * Do an early check for completions. q->lock isn't needed because
608 * nvme_process_completion() only runs in the event loop thread and
609 * cannot race with itself.
611 if ((le16_to_cpu(cqe
->status
) & 0x1) == q
->cq_phase
) {
615 qemu_mutex_lock(&q
->lock
);
616 while (nvme_process_completion(q
)) {
620 qemu_mutex_unlock(&q
->lock
);
625 static bool nvme_poll_queues(BDRVNVMeState
*s
)
627 bool progress
= false;
630 for (i
= 0; i
< s
->nr_queues
; i
++) {
631 if (nvme_poll_queue(s
->queues
[i
])) {
638 static void nvme_handle_event(EventNotifier
*n
)
640 BDRVNVMeState
*s
= container_of(n
, BDRVNVMeState
,
641 irq_notifier
[MSIX_SHARED_IRQ_IDX
]);
643 trace_nvme_handle_event(s
);
644 event_notifier_test_and_clear(n
);
648 static bool nvme_add_io_queue(BlockDriverState
*bs
, Error
**errp
)
650 BDRVNVMeState
*s
= bs
->opaque
;
651 int n
= s
->nr_queues
;
654 int queue_size
= NVME_QUEUE_SIZE
;
656 q
= nvme_create_queue_pair(s
, bdrv_get_aio_context(bs
),
657 n
, queue_size
, errp
);
662 .opcode
= NVME_ADM_CMD_CREATE_CQ
,
663 .dptr
.prp1
= cpu_to_le64(q
->cq
.iova
),
664 .cdw10
= cpu_to_le32(((queue_size
- 1) << 16) | (n
& 0xFFFF)),
665 .cdw11
= cpu_to_le32(0x3),
667 if (nvme_cmd_sync(bs
, s
->queues
[INDEX_ADMIN
], &cmd
)) {
668 error_setg(errp
, "Failed to create CQ io queue [%d]", n
);
672 .opcode
= NVME_ADM_CMD_CREATE_SQ
,
673 .dptr
.prp1
= cpu_to_le64(q
->sq
.iova
),
674 .cdw10
= cpu_to_le32(((queue_size
- 1) << 16) | (n
& 0xFFFF)),
675 .cdw11
= cpu_to_le32(0x1 | (n
<< 16)),
677 if (nvme_cmd_sync(bs
, s
->queues
[INDEX_ADMIN
], &cmd
)) {
678 error_setg(errp
, "Failed to create SQ io queue [%d]", n
);
681 s
->queues
= g_renew(NVMeQueuePair
*, s
->queues
, n
+ 1);
686 nvme_free_queue_pair(q
);
690 static bool nvme_poll_cb(void *opaque
)
692 EventNotifier
*e
= opaque
;
693 BDRVNVMeState
*s
= container_of(e
, BDRVNVMeState
,
694 irq_notifier
[MSIX_SHARED_IRQ_IDX
]);
696 trace_nvme_poll_cb(s
);
697 return nvme_poll_queues(s
);
700 static int nvme_init(BlockDriverState
*bs
, const char *device
, int namespace,
703 BDRVNVMeState
*s
= bs
->opaque
;
704 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
708 uint64_t deadline
, now
;
709 Error
*local_err
= NULL
;
711 qemu_co_mutex_init(&s
->dma_map_lock
);
712 qemu_co_queue_init(&s
->dma_flush_queue
);
713 s
->device
= g_strdup(device
);
715 s
->aio_context
= bdrv_get_aio_context(bs
);
716 ret
= event_notifier_init(&s
->irq_notifier
[MSIX_SHARED_IRQ_IDX
], 0);
718 error_setg(errp
, "Failed to init event notifier");
722 s
->vfio
= qemu_vfio_open_pci(device
, errp
);
728 s
->regs
= qemu_vfio_pci_map_bar(s
->vfio
, 0, 0, NVME_BAR_SIZE
, errp
);
734 /* Perform initialize sequence as described in NVMe spec "7.6.1
735 * Initialization". */
737 cap
= le64_to_cpu(s
->regs
->cap
);
738 if (!(cap
& (1ULL << 37))) {
739 error_setg(errp
, "Device doesn't support NVMe command set");
744 s
->page_size
= MAX(4096, 1 << (12 + ((cap
>> 48) & 0xF)));
745 s
->doorbell_scale
= (4 << (((cap
>> 32) & 0xF))) / sizeof(uint32_t);
746 bs
->bl
.opt_mem_alignment
= s
->page_size
;
747 timeout_ms
= MIN(500 * ((cap
>> 24) & 0xFF), 30000);
749 /* Reset device to get a clean state. */
750 s
->regs
->cc
= cpu_to_le32(le32_to_cpu(s
->regs
->cc
) & 0xFE);
751 /* Wait for CSTS.RDY = 0. */
752 deadline
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) + timeout_ms
* SCALE_MS
;
753 while (le32_to_cpu(s
->regs
->csts
) & 0x1) {
754 if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) > deadline
) {
755 error_setg(errp
, "Timeout while waiting for device to reset (%"
763 /* Set up admin queue. */
764 s
->queues
= g_new(NVMeQueuePair
*, 1);
765 s
->queues
[INDEX_ADMIN
] = nvme_create_queue_pair(s
, aio_context
, 0,
768 if (!s
->queues
[INDEX_ADMIN
]) {
773 QEMU_BUILD_BUG_ON(NVME_QUEUE_SIZE
& 0xF000);
774 s
->regs
->aqa
= cpu_to_le32((NVME_QUEUE_SIZE
<< 16) | NVME_QUEUE_SIZE
);
775 s
->regs
->asq
= cpu_to_le64(s
->queues
[INDEX_ADMIN
]->sq
.iova
);
776 s
->regs
->acq
= cpu_to_le64(s
->queues
[INDEX_ADMIN
]->cq
.iova
);
778 /* After setting up all control registers we can enable device now. */
779 s
->regs
->cc
= cpu_to_le32((ctz32(NVME_CQ_ENTRY_BYTES
) << 20) |
780 (ctz32(NVME_SQ_ENTRY_BYTES
) << 16) |
782 /* Wait for CSTS.RDY = 1. */
783 now
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
784 deadline
= now
+ timeout_ms
* 1000000;
785 while (!(le32_to_cpu(s
->regs
->csts
) & 0x1)) {
786 if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) > deadline
) {
787 error_setg(errp
, "Timeout while waiting for device to start (%"
795 ret
= qemu_vfio_pci_init_irq(s
->vfio
, s
->irq_notifier
,
796 VFIO_PCI_MSIX_IRQ_INDEX
, errp
);
800 aio_set_event_notifier(bdrv_get_aio_context(bs
),
801 &s
->irq_notifier
[MSIX_SHARED_IRQ_IDX
],
802 false, nvme_handle_event
, nvme_poll_cb
);
804 nvme_identify(bs
, namespace, &local_err
);
806 error_propagate(errp
, local_err
);
811 /* Set up command queues. */
812 if (!nvme_add_io_queue(bs
, errp
)) {
816 /* Cleaning up is done in nvme_file_open() upon error. */
820 /* Parse a filename in the format of nvme://XXXX:XX:XX.X/X. Example:
822 * nvme://0000:44:00.0/1
824 * where the "nvme://" is a fixed form of the protocol prefix, the middle part
825 * is the PCI address, and the last part is the namespace number starting from
826 * 1 according to the NVMe spec. */
827 static void nvme_parse_filename(const char *filename
, QDict
*options
,
830 int pref
= strlen("nvme://");
832 if (strlen(filename
) > pref
&& !strncmp(filename
, "nvme://", pref
)) {
833 const char *tmp
= filename
+ pref
;
835 const char *namespace;
837 const char *slash
= strchr(tmp
, '/');
839 qdict_put_str(options
, NVME_BLOCK_OPT_DEVICE
, tmp
);
842 device
= g_strndup(tmp
, slash
- tmp
);
843 qdict_put_str(options
, NVME_BLOCK_OPT_DEVICE
, device
);
845 namespace = slash
+ 1;
846 if (*namespace && qemu_strtoul(namespace, NULL
, 10, &ns
)) {
847 error_setg(errp
, "Invalid namespace '%s', positive number expected",
851 qdict_put_str(options
, NVME_BLOCK_OPT_NAMESPACE
,
852 *namespace ? namespace : "1");
856 static int nvme_enable_disable_write_cache(BlockDriverState
*bs
, bool enable
,
860 BDRVNVMeState
*s
= bs
->opaque
;
862 .opcode
= NVME_ADM_CMD_SET_FEATURES
,
863 .nsid
= cpu_to_le32(s
->nsid
),
864 .cdw10
= cpu_to_le32(0x06),
865 .cdw11
= cpu_to_le32(enable
? 0x01 : 0x00),
868 ret
= nvme_cmd_sync(bs
, s
->queues
[INDEX_ADMIN
], &cmd
);
870 error_setg(errp
, "Failed to configure NVMe write cache");
875 static void nvme_close(BlockDriverState
*bs
)
878 BDRVNVMeState
*s
= bs
->opaque
;
880 for (i
= 0; i
< s
->nr_queues
; ++i
) {
881 nvme_free_queue_pair(s
->queues
[i
]);
884 aio_set_event_notifier(bdrv_get_aio_context(bs
),
885 &s
->irq_notifier
[MSIX_SHARED_IRQ_IDX
],
887 event_notifier_cleanup(&s
->irq_notifier
[MSIX_SHARED_IRQ_IDX
]);
888 qemu_vfio_pci_unmap_bar(s
->vfio
, 0, (void *)s
->regs
, 0, NVME_BAR_SIZE
);
889 qemu_vfio_close(s
->vfio
);
894 static int nvme_file_open(BlockDriverState
*bs
, QDict
*options
, int flags
,
901 BDRVNVMeState
*s
= bs
->opaque
;
903 bs
->supported_write_flags
= BDRV_REQ_FUA
;
905 opts
= qemu_opts_create(&runtime_opts
, NULL
, 0, &error_abort
);
906 qemu_opts_absorb_qdict(opts
, options
, &error_abort
);
907 device
= qemu_opt_get(opts
, NVME_BLOCK_OPT_DEVICE
);
909 error_setg(errp
, "'" NVME_BLOCK_OPT_DEVICE
"' option is required");
914 namespace = qemu_opt_get_number(opts
, NVME_BLOCK_OPT_NAMESPACE
, 1);
915 ret
= nvme_init(bs
, device
, namespace, errp
);
920 if (flags
& BDRV_O_NOCACHE
) {
921 if (!s
->write_cache_supported
) {
923 "NVMe controller doesn't support write cache configuration");
926 ret
= nvme_enable_disable_write_cache(bs
, !(flags
& BDRV_O_NOCACHE
),
939 static int64_t nvme_getlength(BlockDriverState
*bs
)
941 BDRVNVMeState
*s
= bs
->opaque
;
942 return s
->nsze
<< s
->blkshift
;
945 static uint32_t nvme_get_blocksize(BlockDriverState
*bs
)
947 BDRVNVMeState
*s
= bs
->opaque
;
948 assert(s
->blkshift
>= BDRV_SECTOR_BITS
&& s
->blkshift
<= 12);
949 return UINT32_C(1) << s
->blkshift
;
952 static int nvme_probe_blocksizes(BlockDriverState
*bs
, BlockSizes
*bsz
)
954 uint32_t blocksize
= nvme_get_blocksize(bs
);
955 bsz
->phys
= blocksize
;
956 bsz
->log
= blocksize
;
960 /* Called with s->dma_map_lock */
961 static coroutine_fn
int nvme_cmd_unmap_qiov(BlockDriverState
*bs
,
965 BDRVNVMeState
*s
= bs
->opaque
;
967 s
->dma_map_count
-= qiov
->size
;
968 if (!s
->dma_map_count
&& !qemu_co_queue_empty(&s
->dma_flush_queue
)) {
969 r
= qemu_vfio_dma_reset_temporary(s
->vfio
);
971 qemu_co_queue_restart_all(&s
->dma_flush_queue
);
977 /* Called with s->dma_map_lock */
978 static coroutine_fn
int nvme_cmd_map_qiov(BlockDriverState
*bs
, NvmeCmd
*cmd
,
979 NVMeRequest
*req
, QEMUIOVector
*qiov
)
981 BDRVNVMeState
*s
= bs
->opaque
;
982 uint64_t *pagelist
= req
->prp_list_page
;
987 assert(QEMU_IS_ALIGNED(qiov
->size
, s
->page_size
));
988 assert(qiov
->size
/ s
->page_size
<= s
->page_size
/ sizeof(uint64_t));
989 for (i
= 0; i
< qiov
->niov
; ++i
) {
993 r
= qemu_vfio_dma_map(s
->vfio
,
994 qiov
->iov
[i
].iov_base
,
995 qiov
->iov
[i
].iov_len
,
997 if (r
== -ENOMEM
&& retry
) {
999 trace_nvme_dma_flush_queue_wait(s
);
1000 if (s
->dma_map_count
) {
1001 trace_nvme_dma_map_flush(s
);
1002 qemu_co_queue_wait(&s
->dma_flush_queue
, &s
->dma_map_lock
);
1004 r
= qemu_vfio_dma_reset_temporary(s
->vfio
);
1015 for (j
= 0; j
< qiov
->iov
[i
].iov_len
/ s
->page_size
; j
++) {
1016 pagelist
[entries
++] = cpu_to_le64(iova
+ j
* s
->page_size
);
1018 trace_nvme_cmd_map_qiov_iov(s
, i
, qiov
->iov
[i
].iov_base
,
1019 qiov
->iov
[i
].iov_len
/ s
->page_size
);
1022 s
->dma_map_count
+= qiov
->size
;
1024 assert(entries
<= s
->page_size
/ sizeof(uint64_t));
1029 cmd
->dptr
.prp1
= pagelist
[0];
1033 cmd
->dptr
.prp1
= pagelist
[0];
1034 cmd
->dptr
.prp2
= pagelist
[1];
1037 cmd
->dptr
.prp1
= pagelist
[0];
1038 cmd
->dptr
.prp2
= cpu_to_le64(req
->prp_list_iova
+ sizeof(uint64_t));
1041 trace_nvme_cmd_map_qiov(s
, cmd
, req
, qiov
, entries
);
1042 for (i
= 0; i
< entries
; ++i
) {
1043 trace_nvme_cmd_map_qiov_pages(s
, i
, pagelist
[i
]);
1047 /* No need to unmap [0 - i) iovs even if we've failed, since we don't
1048 * increment s->dma_map_count. This is okay for fixed mapping memory areas
1049 * because they are already mapped before calling this function; for
1050 * temporary mappings, a later nvme_cmd_(un)map_qiov will reclaim by
1051 * calling qemu_vfio_dma_reset_temporary when necessary. */
1061 static void nvme_rw_cb_bh(void *opaque
)
1063 NVMeCoData
*data
= opaque
;
1064 qemu_coroutine_enter(data
->co
);
1067 static void nvme_rw_cb(void *opaque
, int ret
)
1069 NVMeCoData
*data
= opaque
;
1072 /* The rw coroutine hasn't yielded, don't try to enter. */
1075 replay_bh_schedule_oneshot_event(data
->ctx
, nvme_rw_cb_bh
, data
);
1078 static coroutine_fn
int nvme_co_prw_aligned(BlockDriverState
*bs
,
1079 uint64_t offset
, uint64_t bytes
,
1085 BDRVNVMeState
*s
= bs
->opaque
;
1086 NVMeQueuePair
*ioq
= s
->queues
[INDEX_IO(0)];
1089 uint32_t cdw12
= (((bytes
>> s
->blkshift
) - 1) & 0xFFFF) |
1090 (flags
& BDRV_REQ_FUA
? 1 << 30 : 0);
1092 .opcode
= is_write
? NVME_CMD_WRITE
: NVME_CMD_READ
,
1093 .nsid
= cpu_to_le32(s
->nsid
),
1094 .cdw10
= cpu_to_le32((offset
>> s
->blkshift
) & 0xFFFFFFFF),
1095 .cdw11
= cpu_to_le32(((offset
>> s
->blkshift
) >> 32) & 0xFFFFFFFF),
1096 .cdw12
= cpu_to_le32(cdw12
),
1099 .ctx
= bdrv_get_aio_context(bs
),
1100 .ret
= -EINPROGRESS
,
1103 trace_nvme_prw_aligned(s
, is_write
, offset
, bytes
, flags
, qiov
->niov
);
1104 assert(s
->nr_queues
> 1);
1105 req
= nvme_get_free_req(ioq
);
1108 qemu_co_mutex_lock(&s
->dma_map_lock
);
1109 r
= nvme_cmd_map_qiov(bs
, &cmd
, req
, qiov
);
1110 qemu_co_mutex_unlock(&s
->dma_map_lock
);
1112 nvme_put_free_req_and_wake(ioq
, req
);
1115 nvme_submit_command(ioq
, req
, &cmd
, nvme_rw_cb
, &data
);
1117 data
.co
= qemu_coroutine_self();
1118 while (data
.ret
== -EINPROGRESS
) {
1119 qemu_coroutine_yield();
1122 qemu_co_mutex_lock(&s
->dma_map_lock
);
1123 r
= nvme_cmd_unmap_qiov(bs
, qiov
);
1124 qemu_co_mutex_unlock(&s
->dma_map_lock
);
1129 trace_nvme_rw_done(s
, is_write
, offset
, bytes
, data
.ret
);
1133 static inline bool nvme_qiov_aligned(BlockDriverState
*bs
,
1134 const QEMUIOVector
*qiov
)
1137 BDRVNVMeState
*s
= bs
->opaque
;
1139 for (i
= 0; i
< qiov
->niov
; ++i
) {
1140 if (!QEMU_PTR_IS_ALIGNED(qiov
->iov
[i
].iov_base
, s
->page_size
) ||
1141 !QEMU_IS_ALIGNED(qiov
->iov
[i
].iov_len
, s
->page_size
)) {
1142 trace_nvme_qiov_unaligned(qiov
, i
, qiov
->iov
[i
].iov_base
,
1143 qiov
->iov
[i
].iov_len
, s
->page_size
);
1150 static int nvme_co_prw(BlockDriverState
*bs
, uint64_t offset
, uint64_t bytes
,
1151 QEMUIOVector
*qiov
, bool is_write
, int flags
)
1153 BDRVNVMeState
*s
= bs
->opaque
;
1155 uint8_t *buf
= NULL
;
1156 QEMUIOVector local_qiov
;
1158 assert(QEMU_IS_ALIGNED(offset
, s
->page_size
));
1159 assert(QEMU_IS_ALIGNED(bytes
, s
->page_size
));
1160 assert(bytes
<= s
->max_transfer
);
1161 if (nvme_qiov_aligned(bs
, qiov
)) {
1162 return nvme_co_prw_aligned(bs
, offset
, bytes
, qiov
, is_write
, flags
);
1164 trace_nvme_prw_buffered(s
, offset
, bytes
, qiov
->niov
, is_write
);
1165 buf
= qemu_try_memalign(s
->page_size
, bytes
);
1170 qemu_iovec_init(&local_qiov
, 1);
1172 qemu_iovec_to_buf(qiov
, 0, buf
, bytes
);
1174 qemu_iovec_add(&local_qiov
, buf
, bytes
);
1175 r
= nvme_co_prw_aligned(bs
, offset
, bytes
, &local_qiov
, is_write
, flags
);
1176 qemu_iovec_destroy(&local_qiov
);
1177 if (!r
&& !is_write
) {
1178 qemu_iovec_from_buf(qiov
, 0, buf
, bytes
);
1184 static coroutine_fn
int nvme_co_preadv(BlockDriverState
*bs
,
1185 uint64_t offset
, uint64_t bytes
,
1186 QEMUIOVector
*qiov
, int flags
)
1188 return nvme_co_prw(bs
, offset
, bytes
, qiov
, false, flags
);
1191 static coroutine_fn
int nvme_co_pwritev(BlockDriverState
*bs
,
1192 uint64_t offset
, uint64_t bytes
,
1193 QEMUIOVector
*qiov
, int flags
)
1195 return nvme_co_prw(bs
, offset
, bytes
, qiov
, true, flags
);
1198 static coroutine_fn
int nvme_co_flush(BlockDriverState
*bs
)
1200 BDRVNVMeState
*s
= bs
->opaque
;
1201 NVMeQueuePair
*ioq
= s
->queues
[INDEX_IO(0)];
1204 .opcode
= NVME_CMD_FLUSH
,
1205 .nsid
= cpu_to_le32(s
->nsid
),
1208 .ctx
= bdrv_get_aio_context(bs
),
1209 .ret
= -EINPROGRESS
,
1212 assert(s
->nr_queues
> 1);
1213 req
= nvme_get_free_req(ioq
);
1215 nvme_submit_command(ioq
, req
, &cmd
, nvme_rw_cb
, &data
);
1217 data
.co
= qemu_coroutine_self();
1218 if (data
.ret
== -EINPROGRESS
) {
1219 qemu_coroutine_yield();
1226 static coroutine_fn
int nvme_co_pwrite_zeroes(BlockDriverState
*bs
,
1229 BdrvRequestFlags flags
)
1231 BDRVNVMeState
*s
= bs
->opaque
;
1232 NVMeQueuePair
*ioq
= s
->queues
[INDEX_IO(0)];
1235 uint32_t cdw12
= ((bytes
>> s
->blkshift
) - 1) & 0xFFFF;
1237 if (!s
->supports_write_zeroes
) {
1242 .opcode
= NVME_CMD_WRITE_ZEROES
,
1243 .nsid
= cpu_to_le32(s
->nsid
),
1244 .cdw10
= cpu_to_le32((offset
>> s
->blkshift
) & 0xFFFFFFFF),
1245 .cdw11
= cpu_to_le32(((offset
>> s
->blkshift
) >> 32) & 0xFFFFFFFF),
1249 .ctx
= bdrv_get_aio_context(bs
),
1250 .ret
= -EINPROGRESS
,
1253 if (flags
& BDRV_REQ_MAY_UNMAP
) {
1257 if (flags
& BDRV_REQ_FUA
) {
1261 cmd
.cdw12
= cpu_to_le32(cdw12
);
1263 trace_nvme_write_zeroes(s
, offset
, bytes
, flags
);
1264 assert(s
->nr_queues
> 1);
1265 req
= nvme_get_free_req(ioq
);
1268 nvme_submit_command(ioq
, req
, &cmd
, nvme_rw_cb
, &data
);
1270 data
.co
= qemu_coroutine_self();
1271 while (data
.ret
== -EINPROGRESS
) {
1272 qemu_coroutine_yield();
1275 trace_nvme_rw_done(s
, true, offset
, bytes
, data
.ret
);
1280 static int coroutine_fn
nvme_co_pdiscard(BlockDriverState
*bs
,
1284 BDRVNVMeState
*s
= bs
->opaque
;
1285 NVMeQueuePair
*ioq
= s
->queues
[INDEX_IO(0)];
1288 QEMUIOVector local_qiov
;
1292 .opcode
= NVME_CMD_DSM
,
1293 .nsid
= cpu_to_le32(s
->nsid
),
1294 .cdw10
= cpu_to_le32(0), /*number of ranges - 0 based*/
1295 .cdw11
= cpu_to_le32(1 << 2), /*deallocate bit*/
1299 .ctx
= bdrv_get_aio_context(bs
),
1300 .ret
= -EINPROGRESS
,
1303 if (!s
->supports_discard
) {
1307 assert(s
->nr_queues
> 1);
1309 buf
= qemu_try_memalign(s
->page_size
, s
->page_size
);
1313 memset(buf
, 0, s
->page_size
);
1314 buf
->nlb
= cpu_to_le32(bytes
>> s
->blkshift
);
1315 buf
->slba
= cpu_to_le64(offset
>> s
->blkshift
);
1318 qemu_iovec_init(&local_qiov
, 1);
1319 qemu_iovec_add(&local_qiov
, buf
, 4096);
1321 req
= nvme_get_free_req(ioq
);
1324 qemu_co_mutex_lock(&s
->dma_map_lock
);
1325 ret
= nvme_cmd_map_qiov(bs
, &cmd
, req
, &local_qiov
);
1326 qemu_co_mutex_unlock(&s
->dma_map_lock
);
1329 nvme_put_free_req_and_wake(ioq
, req
);
1333 trace_nvme_dsm(s
, offset
, bytes
);
1335 nvme_submit_command(ioq
, req
, &cmd
, nvme_rw_cb
, &data
);
1337 data
.co
= qemu_coroutine_self();
1338 while (data
.ret
== -EINPROGRESS
) {
1339 qemu_coroutine_yield();
1342 qemu_co_mutex_lock(&s
->dma_map_lock
);
1343 ret
= nvme_cmd_unmap_qiov(bs
, &local_qiov
);
1344 qemu_co_mutex_unlock(&s
->dma_map_lock
);
1351 trace_nvme_dsm_done(s
, offset
, bytes
, ret
);
1353 qemu_iovec_destroy(&local_qiov
);
1360 static int nvme_reopen_prepare(BDRVReopenState
*reopen_state
,
1361 BlockReopenQueue
*queue
, Error
**errp
)
1366 static void nvme_refresh_filename(BlockDriverState
*bs
)
1368 BDRVNVMeState
*s
= bs
->opaque
;
1370 snprintf(bs
->exact_filename
, sizeof(bs
->exact_filename
), "nvme://%s/%i",
1371 s
->device
, s
->nsid
);
1374 static void nvme_refresh_limits(BlockDriverState
*bs
, Error
**errp
)
1376 BDRVNVMeState
*s
= bs
->opaque
;
1378 bs
->bl
.opt_mem_alignment
= s
->page_size
;
1379 bs
->bl
.request_alignment
= s
->page_size
;
1380 bs
->bl
.max_transfer
= s
->max_transfer
;
1383 static void nvme_detach_aio_context(BlockDriverState
*bs
)
1385 BDRVNVMeState
*s
= bs
->opaque
;
1387 for (int i
= 0; i
< s
->nr_queues
; i
++) {
1388 NVMeQueuePair
*q
= s
->queues
[i
];
1390 qemu_bh_delete(q
->completion_bh
);
1391 q
->completion_bh
= NULL
;
1394 aio_set_event_notifier(bdrv_get_aio_context(bs
),
1395 &s
->irq_notifier
[MSIX_SHARED_IRQ_IDX
],
1399 static void nvme_attach_aio_context(BlockDriverState
*bs
,
1400 AioContext
*new_context
)
1402 BDRVNVMeState
*s
= bs
->opaque
;
1404 s
->aio_context
= new_context
;
1405 aio_set_event_notifier(new_context
, &s
->irq_notifier
[MSIX_SHARED_IRQ_IDX
],
1406 false, nvme_handle_event
, nvme_poll_cb
);
1408 for (int i
= 0; i
< s
->nr_queues
; i
++) {
1409 NVMeQueuePair
*q
= s
->queues
[i
];
1412 aio_bh_new(new_context
, nvme_process_completion_bh
, q
);
1416 static void nvme_aio_plug(BlockDriverState
*bs
)
1418 BDRVNVMeState
*s
= bs
->opaque
;
1419 assert(!s
->plugged
);
1423 static void nvme_aio_unplug(BlockDriverState
*bs
)
1426 BDRVNVMeState
*s
= bs
->opaque
;
1429 for (i
= INDEX_IO(0); i
< s
->nr_queues
; i
++) {
1430 NVMeQueuePair
*q
= s
->queues
[i
];
1431 qemu_mutex_lock(&q
->lock
);
1433 nvme_process_completion(q
);
1434 qemu_mutex_unlock(&q
->lock
);
1438 static void nvme_register_buf(BlockDriverState
*bs
, void *host
, size_t size
)
1441 BDRVNVMeState
*s
= bs
->opaque
;
1443 ret
= qemu_vfio_dma_map(s
->vfio
, host
, size
, false, NULL
);
1445 /* FIXME: we may run out of IOVA addresses after repeated
1446 * bdrv_register_buf/bdrv_unregister_buf, because nvme_vfio_dma_unmap
1447 * doesn't reclaim addresses for fixed mappings. */
1448 error_report("nvme_register_buf failed: %s", strerror(-ret
));
1452 static void nvme_unregister_buf(BlockDriverState
*bs
, void *host
)
1454 BDRVNVMeState
*s
= bs
->opaque
;
1456 qemu_vfio_dma_unmap(s
->vfio
, host
);
1459 static const char *const nvme_strong_runtime_opts
[] = {
1460 NVME_BLOCK_OPT_DEVICE
,
1461 NVME_BLOCK_OPT_NAMESPACE
,
1466 static BlockDriver bdrv_nvme
= {
1467 .format_name
= "nvme",
1468 .protocol_name
= "nvme",
1469 .instance_size
= sizeof(BDRVNVMeState
),
1471 .bdrv_co_create_opts
= bdrv_co_create_opts_simple
,
1472 .create_opts
= &bdrv_create_opts_simple
,
1474 .bdrv_parse_filename
= nvme_parse_filename
,
1475 .bdrv_file_open
= nvme_file_open
,
1476 .bdrv_close
= nvme_close
,
1477 .bdrv_getlength
= nvme_getlength
,
1478 .bdrv_probe_blocksizes
= nvme_probe_blocksizes
,
1480 .bdrv_co_preadv
= nvme_co_preadv
,
1481 .bdrv_co_pwritev
= nvme_co_pwritev
,
1483 .bdrv_co_pwrite_zeroes
= nvme_co_pwrite_zeroes
,
1484 .bdrv_co_pdiscard
= nvme_co_pdiscard
,
1486 .bdrv_co_flush_to_disk
= nvme_co_flush
,
1487 .bdrv_reopen_prepare
= nvme_reopen_prepare
,
1489 .bdrv_refresh_filename
= nvme_refresh_filename
,
1490 .bdrv_refresh_limits
= nvme_refresh_limits
,
1491 .strong_runtime_opts
= nvme_strong_runtime_opts
,
1493 .bdrv_detach_aio_context
= nvme_detach_aio_context
,
1494 .bdrv_attach_aio_context
= nvme_attach_aio_context
,
1496 .bdrv_io_plug
= nvme_aio_plug
,
1497 .bdrv_io_unplug
= nvme_aio_unplug
,
1499 .bdrv_register_buf
= nvme_register_buf
,
1500 .bdrv_unregister_buf
= nvme_unregister_buf
,
1503 static void bdrv_nvme_init(void)
1505 bdrv_register(&bdrv_nvme
);
1508 block_init(bdrv_nvme_init
);