2 * NVMe block driver based on vfio
4 * Copyright 2016 - 2018 Red Hat, Inc.
7 * Fam Zheng <famz@redhat.com>
8 * Paolo Bonzini <pbonzini@redhat.com>
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include <linux/vfio.h>
16 #include "qapi/error.h"
17 #include "qapi/qmp/qdict.h"
18 #include "qapi/qmp/qstring.h"
19 #include "qemu/error-report.h"
20 #include "qemu/main-loop.h"
21 #include "qemu/module.h"
22 #include "qemu/cutils.h"
23 #include "qemu/option.h"
24 #include "qemu/memalign.h"
25 #include "qemu/vfio-helpers.h"
26 #include "block/block-io.h"
27 #include "block/block_int.h"
28 #include "sysemu/block-backend.h"
29 #include "sysemu/replay.h"
32 #include "block/nvme.h"
34 #define NVME_SQ_ENTRY_BYTES 64
35 #define NVME_CQ_ENTRY_BYTES 16
36 #define NVME_QUEUE_SIZE 128
37 #define NVME_DOORBELL_SIZE 4096
40 * We have to leave one slot empty as that is the full queue case where
43 #define NVME_NUM_REQS (NVME_QUEUE_SIZE - 1)
45 typedef struct BDRVNVMeState BDRVNVMeState
;
47 /* Same index is used for queues and IRQs */
49 #define INDEX_IO(n) (1 + n)
51 /* This driver shares a single MSIX IRQ for the admin and I/O queues */
53 MSIX_SHARED_IRQ_IDX
= 0,
61 /* Hardware MMIO register */
62 volatile uint32_t *doorbell
;
66 BlockCompletionFunc
*cb
;
70 uint64_t prp_list_iova
;
71 int free_req_next
; /* q->reqs[] index of next free req */
77 /* Read from I/O code path, initialized under BQL */
81 /* Fields protected by BQL */
82 uint8_t *prp_list_pages
;
84 /* Fields protected by @lock */
85 CoQueue free_req_queue
;
89 NVMeRequest reqs
[NVME_NUM_REQS
];
93 /* Thread-safe, no lock necessary */
94 QEMUBH
*completion_bh
;
97 struct BDRVNVMeState
{
98 AioContext
*aio_context
;
101 /* Memory mapped registers */
106 /* The submission/completion queue pairs.
110 NVMeQueuePair
**queues
;
111 unsigned queue_count
;
113 /* How many uint32_t elements does each doorbell entry take. */
114 size_t doorbell_scale
;
115 bool write_cache_supported
;
116 EventNotifier irq_notifier
[MSIX_IRQ_COUNT
];
118 uint64_t nsze
; /* Namespace size reported by identify command */
119 int nsid
; /* The namespace id to read/write data. */
122 uint64_t max_transfer
;
124 bool supports_write_zeroes
;
125 bool supports_discard
;
127 CoMutex dma_map_lock
;
128 CoQueue dma_flush_queue
;
130 /* Total size of mapped qiov, accessed under dma_map_lock */
133 /* PCI address (required for nvme_refresh_filename()) */
137 uint64_t completion_errors
;
138 uint64_t aligned_accesses
;
139 uint64_t unaligned_accesses
;
143 #define NVME_BLOCK_OPT_DEVICE "device"
144 #define NVME_BLOCK_OPT_NAMESPACE "namespace"
146 static void nvme_process_completion_bh(void *opaque
);
148 static QemuOptsList runtime_opts
= {
150 .head
= QTAILQ_HEAD_INITIALIZER(runtime_opts
.head
),
153 .name
= NVME_BLOCK_OPT_DEVICE
,
154 .type
= QEMU_OPT_STRING
,
155 .help
= "NVMe PCI device address",
158 .name
= NVME_BLOCK_OPT_NAMESPACE
,
159 .type
= QEMU_OPT_NUMBER
,
160 .help
= "NVMe namespace",
162 { /* end of list */ }
166 /* Returns true on success, false on failure. */
167 static bool nvme_init_queue(BDRVNVMeState
*s
, NVMeQueue
*q
,
168 unsigned nentries
, size_t entry_bytes
, Error
**errp
)
173 bytes
= ROUND_UP(nentries
* entry_bytes
, qemu_real_host_page_size());
174 q
->head
= q
->tail
= 0;
175 q
->queue
= qemu_try_memalign(qemu_real_host_page_size(), bytes
);
177 error_setg(errp
, "Cannot allocate queue");
180 memset(q
->queue
, 0, bytes
);
181 r
= qemu_vfio_dma_map(s
->vfio
, q
->queue
, bytes
, false, &q
->iova
, errp
);
183 error_prepend(errp
, "Cannot map queue: ");
188 static void nvme_free_queue(NVMeQueue
*q
)
190 qemu_vfree(q
->queue
);
193 static void nvme_free_queue_pair(NVMeQueuePair
*q
)
195 trace_nvme_free_queue_pair(q
->index
, q
, &q
->cq
, &q
->sq
);
196 if (q
->completion_bh
) {
197 qemu_bh_delete(q
->completion_bh
);
199 nvme_free_queue(&q
->sq
);
200 nvme_free_queue(&q
->cq
);
201 qemu_vfree(q
->prp_list_pages
);
202 qemu_mutex_destroy(&q
->lock
);
206 static void nvme_free_req_queue_cb(void *opaque
)
208 NVMeQueuePair
*q
= opaque
;
210 qemu_mutex_lock(&q
->lock
);
211 while (q
->free_req_head
!= -1 &&
212 qemu_co_enter_next(&q
->free_req_queue
, &q
->lock
)) {
213 /* Retry waiting requests */
215 qemu_mutex_unlock(&q
->lock
);
218 static NVMeQueuePair
*nvme_create_queue_pair(BDRVNVMeState
*s
,
219 AioContext
*aio_context
,
220 unsigned idx
, size_t size
,
225 uint64_t prp_list_iova
;
228 q
= g_try_new0(NVMeQueuePair
, 1);
230 error_setg(errp
, "Cannot allocate queue pair");
233 trace_nvme_create_queue_pair(idx
, q
, size
, aio_context
,
234 event_notifier_get_fd(s
->irq_notifier
));
235 bytes
= QEMU_ALIGN_UP(s
->page_size
* NVME_NUM_REQS
,
236 qemu_real_host_page_size());
237 q
->prp_list_pages
= qemu_try_memalign(qemu_real_host_page_size(), bytes
);
238 if (!q
->prp_list_pages
) {
239 error_setg(errp
, "Cannot allocate PRP page list");
242 memset(q
->prp_list_pages
, 0, bytes
);
243 qemu_mutex_init(&q
->lock
);
246 qemu_co_queue_init(&q
->free_req_queue
);
247 q
->completion_bh
= aio_bh_new(aio_context
, nvme_process_completion_bh
, q
);
248 r
= qemu_vfio_dma_map(s
->vfio
, q
->prp_list_pages
, bytes
,
249 false, &prp_list_iova
, errp
);
251 error_prepend(errp
, "Cannot map buffer for DMA: ");
254 q
->free_req_head
= -1;
255 for (i
= 0; i
< NVME_NUM_REQS
; i
++) {
256 NVMeRequest
*req
= &q
->reqs
[i
];
258 req
->free_req_next
= q
->free_req_head
;
259 q
->free_req_head
= i
;
260 req
->prp_list_page
= q
->prp_list_pages
+ i
* s
->page_size
;
261 req
->prp_list_iova
= prp_list_iova
+ i
* s
->page_size
;
264 if (!nvme_init_queue(s
, &q
->sq
, size
, NVME_SQ_ENTRY_BYTES
, errp
)) {
267 q
->sq
.doorbell
= &s
->doorbells
[idx
* s
->doorbell_scale
].sq_tail
;
269 if (!nvme_init_queue(s
, &q
->cq
, size
, NVME_CQ_ENTRY_BYTES
, errp
)) {
272 q
->cq
.doorbell
= &s
->doorbells
[idx
* s
->doorbell_scale
].cq_head
;
276 nvme_free_queue_pair(q
);
281 static void nvme_kick(NVMeQueuePair
*q
)
283 BDRVNVMeState
*s
= q
->s
;
288 trace_nvme_kick(s
, q
->index
);
289 assert(!(q
->sq
.tail
& 0xFF00));
290 /* Fence the write to submission queue entry before notifying the device. */
292 *q
->sq
.doorbell
= cpu_to_le32(q
->sq
.tail
);
293 q
->inflight
+= q
->need_kick
;
297 static NVMeRequest
*nvme_get_free_req_nofail_locked(NVMeQueuePair
*q
)
301 req
= &q
->reqs
[q
->free_req_head
];
302 q
->free_req_head
= req
->free_req_next
;
303 req
->free_req_next
= -1;
307 /* Return a free request element if any, otherwise return NULL. */
308 static NVMeRequest
*nvme_get_free_req_nowait(NVMeQueuePair
*q
)
310 QEMU_LOCK_GUARD(&q
->lock
);
311 if (q
->free_req_head
== -1) {
314 return nvme_get_free_req_nofail_locked(q
);
318 * Wait for a free request to become available if necessary, then
321 static coroutine_fn NVMeRequest
*nvme_get_free_req(NVMeQueuePair
*q
)
323 QEMU_LOCK_GUARD(&q
->lock
);
325 while (q
->free_req_head
== -1) {
326 trace_nvme_free_req_queue_wait(q
->s
, q
->index
);
327 qemu_co_queue_wait(&q
->free_req_queue
, &q
->lock
);
330 return nvme_get_free_req_nofail_locked(q
);
334 static void nvme_put_free_req_locked(NVMeQueuePair
*q
, NVMeRequest
*req
)
336 req
->free_req_next
= q
->free_req_head
;
337 q
->free_req_head
= req
- q
->reqs
;
341 static void nvme_wake_free_req_locked(NVMeQueuePair
*q
)
343 if (!qemu_co_queue_empty(&q
->free_req_queue
)) {
344 replay_bh_schedule_oneshot_event(q
->s
->aio_context
,
345 nvme_free_req_queue_cb
, q
);
349 /* Insert a request in the freelist and wake waiters */
350 static void nvme_put_free_req_and_wake(NVMeQueuePair
*q
, NVMeRequest
*req
)
352 qemu_mutex_lock(&q
->lock
);
353 nvme_put_free_req_locked(q
, req
);
354 nvme_wake_free_req_locked(q
);
355 qemu_mutex_unlock(&q
->lock
);
358 static inline int nvme_translate_error(const NvmeCqe
*c
)
360 uint16_t status
= (le16_to_cpu(c
->status
) >> 1) & 0xFF;
362 trace_nvme_error(le32_to_cpu(c
->result
),
363 le16_to_cpu(c
->sq_head
),
364 le16_to_cpu(c
->sq_id
),
366 le16_to_cpu(status
));
381 static bool nvme_process_completion(NVMeQueuePair
*q
)
383 BDRVNVMeState
*s
= q
->s
;
384 bool progress
= false;
389 trace_nvme_process_completion(s
, q
->index
, q
->inflight
);
392 * Support re-entrancy when a request cb() function invokes aio_poll().
393 * Pending completions must be visible to aio_poll() so that a cb()
394 * function can wait for the completion of another request.
396 * The aio_poll() loop will execute our BH and we'll resume completion
399 qemu_bh_schedule(q
->completion_bh
);
401 assert(q
->inflight
>= 0);
402 while (q
->inflight
) {
406 c
= (NvmeCqe
*)&q
->cq
.queue
[q
->cq
.head
* NVME_CQ_ENTRY_BYTES
];
407 if ((le16_to_cpu(c
->status
) & 0x1) == q
->cq_phase
) {
410 ret
= nvme_translate_error(c
);
412 s
->stats
.completion_errors
++;
414 q
->cq
.head
= (q
->cq
.head
+ 1) % NVME_QUEUE_SIZE
;
416 q
->cq_phase
= !q
->cq_phase
;
418 cid
= le16_to_cpu(c
->cid
);
419 if (cid
== 0 || cid
> NVME_QUEUE_SIZE
) {
420 warn_report("NVMe: Unexpected CID in completion queue: %"PRIu32
", "
421 "queue size: %u", cid
, NVME_QUEUE_SIZE
);
424 trace_nvme_complete_command(s
, q
->index
, cid
);
425 preq
= &q
->reqs
[cid
- 1];
427 assert(req
.cid
== cid
);
429 nvme_put_free_req_locked(q
, preq
);
430 preq
->cb
= preq
->opaque
= NULL
;
432 qemu_mutex_unlock(&q
->lock
);
433 req
.cb(req
.opaque
, ret
);
434 qemu_mutex_lock(&q
->lock
);
438 /* Notify the device so it can post more completions. */
440 *q
->cq
.doorbell
= cpu_to_le32(q
->cq
.head
);
441 nvme_wake_free_req_locked(q
);
444 qemu_bh_cancel(q
->completion_bh
);
449 static void nvme_process_completion_bh(void *opaque
)
451 NVMeQueuePair
*q
= opaque
;
454 * We're being invoked because a nvme_process_completion() cb() function
455 * called aio_poll(). The callback may be waiting for further completions
456 * so notify the device that it has space to fill in more completions now.
459 *q
->cq
.doorbell
= cpu_to_le32(q
->cq
.head
);
460 nvme_wake_free_req_locked(q
);
462 nvme_process_completion(q
);
465 static void nvme_trace_command(const NvmeCmd
*cmd
)
469 if (!trace_event_get_state_backends(TRACE_NVME_SUBMIT_COMMAND_RAW
)) {
472 for (i
= 0; i
< 8; ++i
) {
473 uint8_t *cmdp
= (uint8_t *)cmd
+ i
* 8;
474 trace_nvme_submit_command_raw(cmdp
[0], cmdp
[1], cmdp
[2], cmdp
[3],
475 cmdp
[4], cmdp
[5], cmdp
[6], cmdp
[7]);
479 static void nvme_unplug_fn(void *opaque
)
481 NVMeQueuePair
*q
= opaque
;
483 QEMU_LOCK_GUARD(&q
->lock
);
485 nvme_process_completion(q
);
488 static void nvme_submit_command(NVMeQueuePair
*q
, NVMeRequest
*req
,
489 NvmeCmd
*cmd
, BlockCompletionFunc cb
,
494 req
->opaque
= opaque
;
495 cmd
->cid
= cpu_to_le16(req
->cid
);
497 trace_nvme_submit_command(q
->s
, q
->index
, req
->cid
);
498 nvme_trace_command(cmd
);
499 qemu_mutex_lock(&q
->lock
);
500 memcpy((uint8_t *)q
->sq
.queue
+
501 q
->sq
.tail
* NVME_SQ_ENTRY_BYTES
, cmd
, sizeof(*cmd
));
502 q
->sq
.tail
= (q
->sq
.tail
+ 1) % NVME_QUEUE_SIZE
;
504 blk_io_plug_call(nvme_unplug_fn
, q
);
505 qemu_mutex_unlock(&q
->lock
);
508 static void nvme_admin_cmd_sync_cb(void *opaque
, int ret
)
515 static int nvme_admin_cmd_sync(BlockDriverState
*bs
, NvmeCmd
*cmd
)
517 BDRVNVMeState
*s
= bs
->opaque
;
518 NVMeQueuePair
*q
= s
->queues
[INDEX_ADMIN
];
519 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
521 int ret
= -EINPROGRESS
;
522 req
= nvme_get_free_req_nowait(q
);
526 nvme_submit_command(q
, req
, cmd
, nvme_admin_cmd_sync_cb
, &ret
);
528 AIO_WAIT_WHILE(aio_context
, ret
== -EINPROGRESS
);
532 /* Returns true on success, false on failure. */
533 static bool nvme_identify(BlockDriverState
*bs
, int namespace, Error
**errp
)
535 BDRVNVMeState
*s
= bs
->opaque
;
537 QEMU_AUTO_VFREE
union {
546 .opcode
= NVME_ADM_CMD_IDENTIFY
,
547 .cdw10
= cpu_to_le32(0x1),
549 size_t id_size
= QEMU_ALIGN_UP(sizeof(*id
), qemu_real_host_page_size());
551 id
= qemu_try_memalign(qemu_real_host_page_size(), id_size
);
553 error_setg(errp
, "Cannot allocate buffer for identify response");
556 r
= qemu_vfio_dma_map(s
->vfio
, id
, id_size
, true, &iova
, errp
);
558 error_prepend(errp
, "Cannot map buffer for DMA: ");
562 memset(id
, 0, id_size
);
563 cmd
.dptr
.prp1
= cpu_to_le64(iova
);
564 if (nvme_admin_cmd_sync(bs
, &cmd
)) {
565 error_setg(errp
, "Failed to identify controller");
569 if (le32_to_cpu(id
->ctrl
.nn
) < namespace) {
570 error_setg(errp
, "Invalid namespace");
573 s
->write_cache_supported
= le32_to_cpu(id
->ctrl
.vwc
) & 0x1;
574 s
->max_transfer
= (id
->ctrl
.mdts
? 1 << id
->ctrl
.mdts
: 0) * s
->page_size
;
575 /* For now the page list buffer per command is one page, to hold at most
576 * s->page_size / sizeof(uint64_t) entries. */
577 s
->max_transfer
= MIN_NON_ZERO(s
->max_transfer
,
578 s
->page_size
/ sizeof(uint64_t) * s
->page_size
);
580 oncs
= le16_to_cpu(id
->ctrl
.oncs
);
581 s
->supports_write_zeroes
= !!(oncs
& NVME_ONCS_WRITE_ZEROES
);
582 s
->supports_discard
= !!(oncs
& NVME_ONCS_DSM
);
584 memset(id
, 0, id_size
);
586 cmd
.nsid
= cpu_to_le32(namespace);
587 if (nvme_admin_cmd_sync(bs
, &cmd
)) {
588 error_setg(errp
, "Failed to identify namespace");
592 s
->nsze
= le64_to_cpu(id
->ns
.nsze
);
593 lbaf
= &id
->ns
.lbaf
[NVME_ID_NS_FLBAS_INDEX(id
->ns
.flbas
)];
595 if (NVME_ID_NS_DLFEAT_WRITE_ZEROES(id
->ns
.dlfeat
) &&
596 NVME_ID_NS_DLFEAT_READ_BEHAVIOR(id
->ns
.dlfeat
) ==
597 NVME_ID_NS_DLFEAT_READ_BEHAVIOR_ZEROES
) {
598 bs
->supported_write_flags
|= BDRV_REQ_MAY_UNMAP
;
602 error_setg(errp
, "Namespaces with metadata are not yet supported");
606 if (lbaf
->ds
< BDRV_SECTOR_BITS
|| lbaf
->ds
> 12 ||
607 (1 << lbaf
->ds
) > s
->page_size
)
609 error_setg(errp
, "Namespace has unsupported block size (2^%d)",
615 s
->blkshift
= lbaf
->ds
;
617 qemu_vfio_dma_unmap(s
->vfio
, id
);
622 static void nvme_poll_queue(NVMeQueuePair
*q
)
624 const size_t cqe_offset
= q
->cq
.head
* NVME_CQ_ENTRY_BYTES
;
625 NvmeCqe
*cqe
= (NvmeCqe
*)&q
->cq
.queue
[cqe_offset
];
627 trace_nvme_poll_queue(q
->s
, q
->index
);
629 * Do an early check for completions. q->lock isn't needed because
630 * nvme_process_completion() only runs in the event loop thread and
631 * cannot race with itself.
633 if ((le16_to_cpu(cqe
->status
) & 0x1) == q
->cq_phase
) {
637 qemu_mutex_lock(&q
->lock
);
638 while (nvme_process_completion(q
)) {
641 qemu_mutex_unlock(&q
->lock
);
644 static void nvme_poll_queues(BDRVNVMeState
*s
)
648 for (i
= 0; i
< s
->queue_count
; i
++) {
649 nvme_poll_queue(s
->queues
[i
]);
653 static void nvme_handle_event(EventNotifier
*n
)
655 BDRVNVMeState
*s
= container_of(n
, BDRVNVMeState
,
656 irq_notifier
[MSIX_SHARED_IRQ_IDX
]);
658 trace_nvme_handle_event(s
);
659 event_notifier_test_and_clear(n
);
663 static bool nvme_add_io_queue(BlockDriverState
*bs
, Error
**errp
)
665 BDRVNVMeState
*s
= bs
->opaque
;
666 unsigned n
= s
->queue_count
;
669 unsigned queue_size
= NVME_QUEUE_SIZE
;
671 assert(n
<= UINT16_MAX
);
672 q
= nvme_create_queue_pair(s
, bdrv_get_aio_context(bs
),
673 n
, queue_size
, errp
);
678 .opcode
= NVME_ADM_CMD_CREATE_CQ
,
679 .dptr
.prp1
= cpu_to_le64(q
->cq
.iova
),
680 .cdw10
= cpu_to_le32(((queue_size
- 1) << 16) | n
),
681 .cdw11
= cpu_to_le32(NVME_CQ_IEN
| NVME_CQ_PC
),
683 if (nvme_admin_cmd_sync(bs
, &cmd
)) {
684 error_setg(errp
, "Failed to create CQ io queue [%u]", n
);
688 .opcode
= NVME_ADM_CMD_CREATE_SQ
,
689 .dptr
.prp1
= cpu_to_le64(q
->sq
.iova
),
690 .cdw10
= cpu_to_le32(((queue_size
- 1) << 16) | n
),
691 .cdw11
= cpu_to_le32(NVME_SQ_PC
| (n
<< 16)),
693 if (nvme_admin_cmd_sync(bs
, &cmd
)) {
694 error_setg(errp
, "Failed to create SQ io queue [%u]", n
);
697 s
->queues
= g_renew(NVMeQueuePair
*, s
->queues
, n
+ 1);
702 nvme_free_queue_pair(q
);
706 static bool nvme_poll_cb(void *opaque
)
708 EventNotifier
*e
= opaque
;
709 BDRVNVMeState
*s
= container_of(e
, BDRVNVMeState
,
710 irq_notifier
[MSIX_SHARED_IRQ_IDX
]);
713 for (i
= 0; i
< s
->queue_count
; i
++) {
714 NVMeQueuePair
*q
= s
->queues
[i
];
715 const size_t cqe_offset
= q
->cq
.head
* NVME_CQ_ENTRY_BYTES
;
716 NvmeCqe
*cqe
= (NvmeCqe
*)&q
->cq
.queue
[cqe_offset
];
719 * q->lock isn't needed because nvme_process_completion() only runs in
720 * the event loop thread and cannot race with itself.
722 if ((le16_to_cpu(cqe
->status
) & 0x1) != q
->cq_phase
) {
729 static void nvme_poll_ready(EventNotifier
*e
)
731 BDRVNVMeState
*s
= container_of(e
, BDRVNVMeState
,
732 irq_notifier
[MSIX_SHARED_IRQ_IDX
]);
737 static int nvme_init(BlockDriverState
*bs
, const char *device
, int namespace,
740 BDRVNVMeState
*s
= bs
->opaque
;
742 AioContext
*aio_context
= bdrv_get_aio_context(bs
);
747 uint64_t deadline
, now
;
748 volatile NvmeBar
*regs
= NULL
;
750 qemu_co_mutex_init(&s
->dma_map_lock
);
751 qemu_co_queue_init(&s
->dma_flush_queue
);
752 s
->device
= g_strdup(device
);
754 s
->aio_context
= bdrv_get_aio_context(bs
);
755 ret
= event_notifier_init(&s
->irq_notifier
[MSIX_SHARED_IRQ_IDX
], 0);
757 error_setg(errp
, "Failed to init event notifier");
761 s
->vfio
= qemu_vfio_open_pci(device
, errp
);
767 regs
= qemu_vfio_pci_map_bar(s
->vfio
, 0, 0, sizeof(NvmeBar
),
768 PROT_READ
| PROT_WRITE
, errp
);
773 /* Perform initialize sequence as described in NVMe spec "7.6.1
774 * Initialization". */
776 cap
= le64_to_cpu(regs
->cap
);
777 trace_nvme_controller_capability_raw(cap
);
778 trace_nvme_controller_capability("Maximum Queue Entries Supported",
779 1 + NVME_CAP_MQES(cap
));
780 trace_nvme_controller_capability("Contiguous Queues Required",
782 trace_nvme_controller_capability("Doorbell Stride",
783 1 << (2 + NVME_CAP_DSTRD(cap
)));
784 trace_nvme_controller_capability("Subsystem Reset Supported",
785 NVME_CAP_NSSRS(cap
));
786 trace_nvme_controller_capability("Memory Page Size Minimum",
787 1 << (12 + NVME_CAP_MPSMIN(cap
)));
788 trace_nvme_controller_capability("Memory Page Size Maximum",
789 1 << (12 + NVME_CAP_MPSMAX(cap
)));
790 if (!NVME_CAP_CSS(cap
)) {
791 error_setg(errp
, "Device doesn't support NVMe command set");
796 s
->page_size
= 1u << (12 + NVME_CAP_MPSMIN(cap
));
797 s
->doorbell_scale
= (4 << NVME_CAP_DSTRD(cap
)) / sizeof(uint32_t);
798 bs
->bl
.opt_mem_alignment
= s
->page_size
;
799 bs
->bl
.request_alignment
= s
->page_size
;
800 timeout_ms
= MIN(500 * NVME_CAP_TO(cap
), 30000);
802 ver
= le32_to_cpu(regs
->vs
);
803 trace_nvme_controller_spec_version(extract32(ver
, 16, 16),
804 extract32(ver
, 8, 8),
805 extract32(ver
, 0, 8));
807 /* Reset device to get a clean state. */
808 regs
->cc
= cpu_to_le32(le32_to_cpu(regs
->cc
) & 0xFE);
809 /* Wait for CSTS.RDY = 0. */
810 deadline
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) + timeout_ms
* SCALE_MS
;
811 while (NVME_CSTS_RDY(le32_to_cpu(regs
->csts
))) {
812 if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) > deadline
) {
813 error_setg(errp
, "Timeout while waiting for device to reset (%"
821 s
->bar0_wo_map
= qemu_vfio_pci_map_bar(s
->vfio
, 0, 0,
822 sizeof(NvmeBar
) + NVME_DOORBELL_SIZE
,
824 s
->doorbells
= (void *)((uintptr_t)s
->bar0_wo_map
+ sizeof(NvmeBar
));
830 /* Set up admin queue. */
831 s
->queues
= g_new(NVMeQueuePair
*, 1);
832 q
= nvme_create_queue_pair(s
, aio_context
, 0, NVME_QUEUE_SIZE
, errp
);
837 s
->queues
[INDEX_ADMIN
] = q
;
839 QEMU_BUILD_BUG_ON((NVME_QUEUE_SIZE
- 1) & 0xF000);
840 regs
->aqa
= cpu_to_le32(((NVME_QUEUE_SIZE
- 1) << AQA_ACQS_SHIFT
) |
841 ((NVME_QUEUE_SIZE
- 1) << AQA_ASQS_SHIFT
));
842 regs
->asq
= cpu_to_le64(q
->sq
.iova
);
843 regs
->acq
= cpu_to_le64(q
->cq
.iova
);
845 /* After setting up all control registers we can enable device now. */
846 regs
->cc
= cpu_to_le32((ctz32(NVME_CQ_ENTRY_BYTES
) << CC_IOCQES_SHIFT
) |
847 (ctz32(NVME_SQ_ENTRY_BYTES
) << CC_IOSQES_SHIFT
) |
849 /* Wait for CSTS.RDY = 1. */
850 now
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
851 deadline
= now
+ timeout_ms
* SCALE_MS
;
852 while (!NVME_CSTS_RDY(le32_to_cpu(regs
->csts
))) {
853 if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) > deadline
) {
854 error_setg(errp
, "Timeout while waiting for device to start (%"
862 ret
= qemu_vfio_pci_init_irq(s
->vfio
, s
->irq_notifier
,
863 VFIO_PCI_MSIX_IRQ_INDEX
, errp
);
867 aio_set_event_notifier(bdrv_get_aio_context(bs
),
868 &s
->irq_notifier
[MSIX_SHARED_IRQ_IDX
],
869 nvme_handle_event
, nvme_poll_cb
,
872 if (!nvme_identify(bs
, namespace, errp
)) {
877 /* Set up command queues. */
878 if (!nvme_add_io_queue(bs
, errp
)) {
883 qemu_vfio_pci_unmap_bar(s
->vfio
, 0, (void *)regs
, 0, sizeof(NvmeBar
));
886 /* Cleaning up is done in nvme_file_open() upon error. */
890 /* Parse a filename in the format of nvme://XXXX:XX:XX.X/X. Example:
892 * nvme://0000:44:00.0/1
894 * where the "nvme://" is a fixed form of the protocol prefix, the middle part
895 * is the PCI address, and the last part is the namespace number starting from
896 * 1 according to the NVMe spec. */
897 static void nvme_parse_filename(const char *filename
, QDict
*options
,
900 int pref
= strlen("nvme://");
902 if (strlen(filename
) > pref
&& !strncmp(filename
, "nvme://", pref
)) {
903 const char *tmp
= filename
+ pref
;
905 const char *namespace;
907 const char *slash
= strchr(tmp
, '/');
909 qdict_put_str(options
, NVME_BLOCK_OPT_DEVICE
, tmp
);
912 device
= g_strndup(tmp
, slash
- tmp
);
913 qdict_put_str(options
, NVME_BLOCK_OPT_DEVICE
, device
);
915 namespace = slash
+ 1;
916 if (*namespace && qemu_strtoul(namespace, NULL
, 10, &ns
)) {
917 error_setg(errp
, "Invalid namespace '%s', positive number expected",
921 qdict_put_str(options
, NVME_BLOCK_OPT_NAMESPACE
,
922 *namespace ? namespace : "1");
926 static int nvme_enable_disable_write_cache(BlockDriverState
*bs
, bool enable
,
930 BDRVNVMeState
*s
= bs
->opaque
;
932 .opcode
= NVME_ADM_CMD_SET_FEATURES
,
933 .nsid
= cpu_to_le32(s
->nsid
),
934 .cdw10
= cpu_to_le32(0x06),
935 .cdw11
= cpu_to_le32(enable
? 0x01 : 0x00),
938 ret
= nvme_admin_cmd_sync(bs
, &cmd
);
940 error_setg(errp
, "Failed to configure NVMe write cache");
945 static void nvme_close(BlockDriverState
*bs
)
947 BDRVNVMeState
*s
= bs
->opaque
;
949 for (unsigned i
= 0; i
< s
->queue_count
; ++i
) {
950 nvme_free_queue_pair(s
->queues
[i
]);
953 aio_set_event_notifier(bdrv_get_aio_context(bs
),
954 &s
->irq_notifier
[MSIX_SHARED_IRQ_IDX
],
956 event_notifier_cleanup(&s
->irq_notifier
[MSIX_SHARED_IRQ_IDX
]);
957 qemu_vfio_pci_unmap_bar(s
->vfio
, 0, s
->bar0_wo_map
,
958 0, sizeof(NvmeBar
) + NVME_DOORBELL_SIZE
);
959 qemu_vfio_close(s
->vfio
);
964 static int nvme_file_open(BlockDriverState
*bs
, QDict
*options
, int flags
,
971 BDRVNVMeState
*s
= bs
->opaque
;
973 bs
->supported_write_flags
= BDRV_REQ_FUA
;
975 opts
= qemu_opts_create(&runtime_opts
, NULL
, 0, &error_abort
);
976 qemu_opts_absorb_qdict(opts
, options
, &error_abort
);
977 device
= qemu_opt_get(opts
, NVME_BLOCK_OPT_DEVICE
);
979 error_setg(errp
, "'" NVME_BLOCK_OPT_DEVICE
"' option is required");
984 namespace = qemu_opt_get_number(opts
, NVME_BLOCK_OPT_NAMESPACE
, 1);
985 ret
= nvme_init(bs
, device
, namespace, errp
);
990 if (flags
& BDRV_O_NOCACHE
) {
991 if (!s
->write_cache_supported
) {
993 "NVMe controller doesn't support write cache configuration");
996 ret
= nvme_enable_disable_write_cache(bs
, !(flags
& BDRV_O_NOCACHE
),
1009 static int64_t coroutine_fn
nvme_co_getlength(BlockDriverState
*bs
)
1011 BDRVNVMeState
*s
= bs
->opaque
;
1012 return s
->nsze
<< s
->blkshift
;
1015 static uint32_t nvme_get_blocksize(BlockDriverState
*bs
)
1017 BDRVNVMeState
*s
= bs
->opaque
;
1018 assert(s
->blkshift
>= BDRV_SECTOR_BITS
&& s
->blkshift
<= 12);
1019 return UINT32_C(1) << s
->blkshift
;
1022 static int nvme_probe_blocksizes(BlockDriverState
*bs
, BlockSizes
*bsz
)
1024 uint32_t blocksize
= nvme_get_blocksize(bs
);
1025 bsz
->phys
= blocksize
;
1026 bsz
->log
= blocksize
;
1030 /* Called with s->dma_map_lock */
1031 static coroutine_fn
int nvme_cmd_unmap_qiov(BlockDriverState
*bs
,
1035 BDRVNVMeState
*s
= bs
->opaque
;
1037 s
->dma_map_count
-= qiov
->size
;
1038 if (!s
->dma_map_count
&& !qemu_co_queue_empty(&s
->dma_flush_queue
)) {
1039 r
= qemu_vfio_dma_reset_temporary(s
->vfio
);
1041 qemu_co_queue_restart_all(&s
->dma_flush_queue
);
1047 /* Called with s->dma_map_lock */
1048 static coroutine_fn
int nvme_cmd_map_qiov(BlockDriverState
*bs
, NvmeCmd
*cmd
,
1049 NVMeRequest
*req
, QEMUIOVector
*qiov
)
1051 BDRVNVMeState
*s
= bs
->opaque
;
1052 uint64_t *pagelist
= req
->prp_list_page
;
1055 Error
*local_err
= NULL
, **errp
= NULL
;
1058 assert(QEMU_IS_ALIGNED(qiov
->size
, s
->page_size
));
1059 assert(qiov
->size
/ s
->page_size
<= s
->page_size
/ sizeof(uint64_t));
1060 for (i
= 0; i
< qiov
->niov
; ++i
) {
1063 size_t len
= QEMU_ALIGN_UP(qiov
->iov
[i
].iov_len
,
1064 qemu_real_host_page_size());
1066 r
= qemu_vfio_dma_map(s
->vfio
,
1067 qiov
->iov
[i
].iov_base
,
1068 len
, true, &iova
, errp
);
1071 * In addition to the -ENOMEM error, the VFIO_IOMMU_MAP_DMA
1072 * ioctl returns -ENOSPC to signal the user exhausted the DMA
1073 * mappings available for a container since Linux kernel commit
1074 * 492855939bdb ("vfio/type1: Limit DMA mappings per container",
1075 * April 2019, see CVE-2019-3882).
1077 * This block driver already handles this error path by checking
1078 * for the -ENOMEM error, so we directly replace -ENOSPC by
1079 * -ENOMEM. Beside, -ENOSPC has a specific meaning for blockdev
1080 * coroutines: it triggers BLOCKDEV_ON_ERROR_ENOSPC and
1081 * BLOCK_ERROR_ACTION_STOP which stops the VM, asking the operator
1082 * to add more storage to the blockdev. Not something we can do
1083 * easily with an IOMMU :)
1087 if (r
== -ENOMEM
&& retry
) {
1089 * We exhausted the DMA mappings available for our container:
1090 * recycle the volatile IOVA mappings.
1093 trace_nvme_dma_flush_queue_wait(s
);
1094 if (s
->dma_map_count
) {
1095 trace_nvme_dma_map_flush(s
);
1096 qemu_co_queue_wait(&s
->dma_flush_queue
, &s
->dma_map_lock
);
1098 r
= qemu_vfio_dma_reset_temporary(s
->vfio
);
1111 for (j
= 0; j
< qiov
->iov
[i
].iov_len
/ s
->page_size
; j
++) {
1112 pagelist
[entries
++] = cpu_to_le64(iova
+ j
* s
->page_size
);
1114 trace_nvme_cmd_map_qiov_iov(s
, i
, qiov
->iov
[i
].iov_base
,
1115 qiov
->iov
[i
].iov_len
/ s
->page_size
);
1118 s
->dma_map_count
+= qiov
->size
;
1120 assert(entries
<= s
->page_size
/ sizeof(uint64_t));
1125 cmd
->dptr
.prp1
= pagelist
[0];
1129 cmd
->dptr
.prp1
= pagelist
[0];
1130 cmd
->dptr
.prp2
= pagelist
[1];
1133 cmd
->dptr
.prp1
= pagelist
[0];
1134 cmd
->dptr
.prp2
= cpu_to_le64(req
->prp_list_iova
+ sizeof(uint64_t));
1137 trace_nvme_cmd_map_qiov(s
, cmd
, req
, qiov
, entries
);
1138 for (i
= 0; i
< entries
; ++i
) {
1139 trace_nvme_cmd_map_qiov_pages(s
, i
, pagelist
[i
]);
1143 /* No need to unmap [0 - i) iovs even if we've failed, since we don't
1144 * increment s->dma_map_count. This is okay for fixed mapping memory areas
1145 * because they are already mapped before calling this function; for
1146 * temporary mappings, a later nvme_cmd_(un)map_qiov will reclaim by
1147 * calling qemu_vfio_dma_reset_temporary when necessary. */
1149 error_reportf_err(local_err
, "Cannot map buffer for DMA: ");
1160 static void nvme_rw_cb_bh(void *opaque
)
1162 NVMeCoData
*data
= opaque
;
1163 qemu_coroutine_enter(data
->co
);
1166 static void nvme_rw_cb(void *opaque
, int ret
)
1168 NVMeCoData
*data
= opaque
;
1171 /* The rw coroutine hasn't yielded, don't try to enter. */
1174 replay_bh_schedule_oneshot_event(data
->ctx
, nvme_rw_cb_bh
, data
);
1177 static coroutine_fn
int nvme_co_prw_aligned(BlockDriverState
*bs
,
1178 uint64_t offset
, uint64_t bytes
,
1184 BDRVNVMeState
*s
= bs
->opaque
;
1185 NVMeQueuePair
*ioq
= s
->queues
[INDEX_IO(0)];
1188 uint32_t cdw12
= (((bytes
>> s
->blkshift
) - 1) & 0xFFFF) |
1189 (flags
& BDRV_REQ_FUA
? 1 << 30 : 0);
1191 .opcode
= is_write
? NVME_CMD_WRITE
: NVME_CMD_READ
,
1192 .nsid
= cpu_to_le32(s
->nsid
),
1193 .cdw10
= cpu_to_le32((offset
>> s
->blkshift
) & 0xFFFFFFFF),
1194 .cdw11
= cpu_to_le32(((offset
>> s
->blkshift
) >> 32) & 0xFFFFFFFF),
1195 .cdw12
= cpu_to_le32(cdw12
),
1198 .ctx
= bdrv_get_aio_context(bs
),
1199 .ret
= -EINPROGRESS
,
1202 trace_nvme_prw_aligned(s
, is_write
, offset
, bytes
, flags
, qiov
->niov
);
1203 assert(s
->queue_count
> 1);
1204 req
= nvme_get_free_req(ioq
);
1207 qemu_co_mutex_lock(&s
->dma_map_lock
);
1208 r
= nvme_cmd_map_qiov(bs
, &cmd
, req
, qiov
);
1209 qemu_co_mutex_unlock(&s
->dma_map_lock
);
1211 nvme_put_free_req_and_wake(ioq
, req
);
1214 nvme_submit_command(ioq
, req
, &cmd
, nvme_rw_cb
, &data
);
1216 data
.co
= qemu_coroutine_self();
1217 while (data
.ret
== -EINPROGRESS
) {
1218 qemu_coroutine_yield();
1221 qemu_co_mutex_lock(&s
->dma_map_lock
);
1222 r
= nvme_cmd_unmap_qiov(bs
, qiov
);
1223 qemu_co_mutex_unlock(&s
->dma_map_lock
);
1228 trace_nvme_rw_done(s
, is_write
, offset
, bytes
, data
.ret
);
1232 static inline bool nvme_qiov_aligned(BlockDriverState
*bs
,
1233 const QEMUIOVector
*qiov
)
1236 BDRVNVMeState
*s
= bs
->opaque
;
1238 for (i
= 0; i
< qiov
->niov
; ++i
) {
1239 if (!QEMU_PTR_IS_ALIGNED(qiov
->iov
[i
].iov_base
,
1240 qemu_real_host_page_size()) ||
1241 !QEMU_IS_ALIGNED(qiov
->iov
[i
].iov_len
, qemu_real_host_page_size())) {
1242 trace_nvme_qiov_unaligned(qiov
, i
, qiov
->iov
[i
].iov_base
,
1243 qiov
->iov
[i
].iov_len
, s
->page_size
);
1250 static coroutine_fn
int nvme_co_prw(BlockDriverState
*bs
,
1251 uint64_t offset
, uint64_t bytes
,
1252 QEMUIOVector
*qiov
, bool is_write
,
1255 BDRVNVMeState
*s
= bs
->opaque
;
1257 QEMU_AUTO_VFREE
uint8_t *buf
= NULL
;
1258 QEMUIOVector local_qiov
;
1259 size_t len
= QEMU_ALIGN_UP(bytes
, qemu_real_host_page_size());
1260 assert(QEMU_IS_ALIGNED(offset
, s
->page_size
));
1261 assert(QEMU_IS_ALIGNED(bytes
, s
->page_size
));
1262 assert(bytes
<= s
->max_transfer
);
1263 if (nvme_qiov_aligned(bs
, qiov
)) {
1264 s
->stats
.aligned_accesses
++;
1265 return nvme_co_prw_aligned(bs
, offset
, bytes
, qiov
, is_write
, flags
);
1267 s
->stats
.unaligned_accesses
++;
1268 trace_nvme_prw_buffered(s
, offset
, bytes
, qiov
->niov
, is_write
);
1269 buf
= qemu_try_memalign(qemu_real_host_page_size(), len
);
1274 qemu_iovec_init(&local_qiov
, 1);
1276 qemu_iovec_to_buf(qiov
, 0, buf
, bytes
);
1278 qemu_iovec_add(&local_qiov
, buf
, bytes
);
1279 r
= nvme_co_prw_aligned(bs
, offset
, bytes
, &local_qiov
, is_write
, flags
);
1280 qemu_iovec_destroy(&local_qiov
);
1281 if (!r
&& !is_write
) {
1282 qemu_iovec_from_buf(qiov
, 0, buf
, bytes
);
1287 static coroutine_fn
int nvme_co_preadv(BlockDriverState
*bs
,
1288 int64_t offset
, int64_t bytes
,
1290 BdrvRequestFlags flags
)
1292 return nvme_co_prw(bs
, offset
, bytes
, qiov
, false, flags
);
1295 static coroutine_fn
int nvme_co_pwritev(BlockDriverState
*bs
,
1296 int64_t offset
, int64_t bytes
,
1298 BdrvRequestFlags flags
)
1300 return nvme_co_prw(bs
, offset
, bytes
, qiov
, true, flags
);
1303 static coroutine_fn
int nvme_co_flush(BlockDriverState
*bs
)
1305 BDRVNVMeState
*s
= bs
->opaque
;
1306 NVMeQueuePair
*ioq
= s
->queues
[INDEX_IO(0)];
1309 .opcode
= NVME_CMD_FLUSH
,
1310 .nsid
= cpu_to_le32(s
->nsid
),
1313 .ctx
= bdrv_get_aio_context(bs
),
1314 .ret
= -EINPROGRESS
,
1317 assert(s
->queue_count
> 1);
1318 req
= nvme_get_free_req(ioq
);
1320 nvme_submit_command(ioq
, req
, &cmd
, nvme_rw_cb
, &data
);
1322 data
.co
= qemu_coroutine_self();
1323 if (data
.ret
== -EINPROGRESS
) {
1324 qemu_coroutine_yield();
1331 static coroutine_fn
int nvme_co_pwrite_zeroes(BlockDriverState
*bs
,
1334 BdrvRequestFlags flags
)
1336 BDRVNVMeState
*s
= bs
->opaque
;
1337 NVMeQueuePair
*ioq
= s
->queues
[INDEX_IO(0)];
1341 if (!s
->supports_write_zeroes
) {
1349 cdw12
= ((bytes
>> s
->blkshift
) - 1) & 0xFFFF;
1351 * We should not lose information. pwrite_zeroes_alignment and
1352 * max_pwrite_zeroes guarantees it.
1354 assert(((cdw12
+ 1) << s
->blkshift
) == bytes
);
1357 .opcode
= NVME_CMD_WRITE_ZEROES
,
1358 .nsid
= cpu_to_le32(s
->nsid
),
1359 .cdw10
= cpu_to_le32((offset
>> s
->blkshift
) & 0xFFFFFFFF),
1360 .cdw11
= cpu_to_le32(((offset
>> s
->blkshift
) >> 32) & 0xFFFFFFFF),
1364 .ctx
= bdrv_get_aio_context(bs
),
1365 .ret
= -EINPROGRESS
,
1368 if (flags
& BDRV_REQ_MAY_UNMAP
) {
1372 if (flags
& BDRV_REQ_FUA
) {
1376 cmd
.cdw12
= cpu_to_le32(cdw12
);
1378 trace_nvme_write_zeroes(s
, offset
, bytes
, flags
);
1379 assert(s
->queue_count
> 1);
1380 req
= nvme_get_free_req(ioq
);
1383 nvme_submit_command(ioq
, req
, &cmd
, nvme_rw_cb
, &data
);
1385 data
.co
= qemu_coroutine_self();
1386 while (data
.ret
== -EINPROGRESS
) {
1387 qemu_coroutine_yield();
1390 trace_nvme_rw_done(s
, true, offset
, bytes
, data
.ret
);
1395 static int coroutine_fn
nvme_co_pdiscard(BlockDriverState
*bs
,
1399 BDRVNVMeState
*s
= bs
->opaque
;
1400 NVMeQueuePair
*ioq
= s
->queues
[INDEX_IO(0)];
1402 QEMU_AUTO_VFREE NvmeDsmRange
*buf
= NULL
;
1403 QEMUIOVector local_qiov
;
1407 .opcode
= NVME_CMD_DSM
,
1408 .nsid
= cpu_to_le32(s
->nsid
),
1409 .cdw10
= cpu_to_le32(0), /*number of ranges - 0 based*/
1410 .cdw11
= cpu_to_le32(1 << 2), /*deallocate bit*/
1414 .ctx
= bdrv_get_aio_context(bs
),
1415 .ret
= -EINPROGRESS
,
1418 if (!s
->supports_discard
) {
1422 assert(s
->queue_count
> 1);
1425 * Filling the @buf requires @offset and @bytes to satisfy restrictions
1426 * defined in nvme_refresh_limits().
1428 assert(QEMU_IS_ALIGNED(bytes
, 1UL << s
->blkshift
));
1429 assert(QEMU_IS_ALIGNED(offset
, 1UL << s
->blkshift
));
1430 assert((bytes
>> s
->blkshift
) <= UINT32_MAX
);
1432 buf
= qemu_try_memalign(s
->page_size
, s
->page_size
);
1436 memset(buf
, 0, s
->page_size
);
1437 buf
->nlb
= cpu_to_le32(bytes
>> s
->blkshift
);
1438 buf
->slba
= cpu_to_le64(offset
>> s
->blkshift
);
1441 qemu_iovec_init(&local_qiov
, 1);
1442 qemu_iovec_add(&local_qiov
, buf
, 4096);
1444 req
= nvme_get_free_req(ioq
);
1447 qemu_co_mutex_lock(&s
->dma_map_lock
);
1448 ret
= nvme_cmd_map_qiov(bs
, &cmd
, req
, &local_qiov
);
1449 qemu_co_mutex_unlock(&s
->dma_map_lock
);
1452 nvme_put_free_req_and_wake(ioq
, req
);
1456 trace_nvme_dsm(s
, offset
, bytes
);
1458 nvme_submit_command(ioq
, req
, &cmd
, nvme_rw_cb
, &data
);
1460 data
.co
= qemu_coroutine_self();
1461 while (data
.ret
== -EINPROGRESS
) {
1462 qemu_coroutine_yield();
1465 qemu_co_mutex_lock(&s
->dma_map_lock
);
1466 ret
= nvme_cmd_unmap_qiov(bs
, &local_qiov
);
1467 qemu_co_mutex_unlock(&s
->dma_map_lock
);
1474 trace_nvme_dsm_done(s
, offset
, bytes
, ret
);
1476 qemu_iovec_destroy(&local_qiov
);
1481 static int coroutine_fn
nvme_co_truncate(BlockDriverState
*bs
, int64_t offset
,
1482 bool exact
, PreallocMode prealloc
,
1483 BdrvRequestFlags flags
, Error
**errp
)
1487 if (prealloc
!= PREALLOC_MODE_OFF
) {
1488 error_setg(errp
, "Unsupported preallocation mode '%s'",
1489 PreallocMode_str(prealloc
));
1493 cur_length
= nvme_co_getlength(bs
);
1494 if (offset
!= cur_length
&& exact
) {
1495 error_setg(errp
, "Cannot resize NVMe devices");
1497 } else if (offset
> cur_length
) {
1498 error_setg(errp
, "Cannot grow NVMe devices");
1505 static int nvme_reopen_prepare(BDRVReopenState
*reopen_state
,
1506 BlockReopenQueue
*queue
, Error
**errp
)
1511 static void nvme_refresh_filename(BlockDriverState
*bs
)
1513 BDRVNVMeState
*s
= bs
->opaque
;
1515 snprintf(bs
->exact_filename
, sizeof(bs
->exact_filename
), "nvme://%s/%i",
1516 s
->device
, s
->nsid
);
1519 static void nvme_refresh_limits(BlockDriverState
*bs
, Error
**errp
)
1521 BDRVNVMeState
*s
= bs
->opaque
;
1523 bs
->bl
.opt_mem_alignment
= s
->page_size
;
1524 bs
->bl
.request_alignment
= s
->page_size
;
1525 bs
->bl
.max_transfer
= s
->max_transfer
;
1528 * Look at nvme_co_pwrite_zeroes: after shift and decrement we should get
1531 bs
->bl
.max_pwrite_zeroes
= 1ULL << (s
->blkshift
+ 16);
1532 bs
->bl
.pwrite_zeroes_alignment
= MAX(bs
->bl
.request_alignment
,
1533 1UL << s
->blkshift
);
1535 bs
->bl
.max_pdiscard
= (uint64_t)UINT32_MAX
<< s
->blkshift
;
1536 bs
->bl
.pdiscard_alignment
= MAX(bs
->bl
.request_alignment
,
1537 1UL << s
->blkshift
);
1540 static void nvme_detach_aio_context(BlockDriverState
*bs
)
1542 BDRVNVMeState
*s
= bs
->opaque
;
1544 for (unsigned i
= 0; i
< s
->queue_count
; i
++) {
1545 NVMeQueuePair
*q
= s
->queues
[i
];
1547 qemu_bh_delete(q
->completion_bh
);
1548 q
->completion_bh
= NULL
;
1551 aio_set_event_notifier(bdrv_get_aio_context(bs
),
1552 &s
->irq_notifier
[MSIX_SHARED_IRQ_IDX
],
1556 static void nvme_attach_aio_context(BlockDriverState
*bs
,
1557 AioContext
*new_context
)
1559 BDRVNVMeState
*s
= bs
->opaque
;
1561 s
->aio_context
= new_context
;
1562 aio_set_event_notifier(new_context
, &s
->irq_notifier
[MSIX_SHARED_IRQ_IDX
],
1563 nvme_handle_event
, nvme_poll_cb
,
1566 for (unsigned i
= 0; i
< s
->queue_count
; i
++) {
1567 NVMeQueuePair
*q
= s
->queues
[i
];
1570 aio_bh_new(new_context
, nvme_process_completion_bh
, q
);
1574 static bool nvme_register_buf(BlockDriverState
*bs
, void *host
, size_t size
,
1578 BDRVNVMeState
*s
= bs
->opaque
;
1581 * FIXME: we may run out of IOVA addresses after repeated
1582 * bdrv_register_buf/bdrv_unregister_buf, because nvme_vfio_dma_unmap
1583 * doesn't reclaim addresses for fixed mappings.
1585 ret
= qemu_vfio_dma_map(s
->vfio
, host
, size
, false, NULL
, errp
);
1589 static void nvme_unregister_buf(BlockDriverState
*bs
, void *host
, size_t size
)
1591 BDRVNVMeState
*s
= bs
->opaque
;
1593 qemu_vfio_dma_unmap(s
->vfio
, host
);
1596 static BlockStatsSpecific
*nvme_get_specific_stats(BlockDriverState
*bs
)
1598 BlockStatsSpecific
*stats
= g_new(BlockStatsSpecific
, 1);
1599 BDRVNVMeState
*s
= bs
->opaque
;
1601 stats
->driver
= BLOCKDEV_DRIVER_NVME
;
1602 stats
->u
.nvme
= (BlockStatsSpecificNvme
) {
1603 .completion_errors
= s
->stats
.completion_errors
,
1604 .aligned_accesses
= s
->stats
.aligned_accesses
,
1605 .unaligned_accesses
= s
->stats
.unaligned_accesses
,
1611 static const char *const nvme_strong_runtime_opts
[] = {
1612 NVME_BLOCK_OPT_DEVICE
,
1613 NVME_BLOCK_OPT_NAMESPACE
,
1618 static BlockDriver bdrv_nvme
= {
1619 .format_name
= "nvme",
1620 .protocol_name
= "nvme",
1621 .instance_size
= sizeof(BDRVNVMeState
),
1623 .bdrv_co_create_opts
= bdrv_co_create_opts_simple
,
1624 .create_opts
= &bdrv_create_opts_simple
,
1626 .bdrv_parse_filename
= nvme_parse_filename
,
1627 .bdrv_file_open
= nvme_file_open
,
1628 .bdrv_close
= nvme_close
,
1629 .bdrv_co_getlength
= nvme_co_getlength
,
1630 .bdrv_probe_blocksizes
= nvme_probe_blocksizes
,
1631 .bdrv_co_truncate
= nvme_co_truncate
,
1633 .bdrv_co_preadv
= nvme_co_preadv
,
1634 .bdrv_co_pwritev
= nvme_co_pwritev
,
1636 .bdrv_co_pwrite_zeroes
= nvme_co_pwrite_zeroes
,
1637 .bdrv_co_pdiscard
= nvme_co_pdiscard
,
1639 .bdrv_co_flush_to_disk
= nvme_co_flush
,
1640 .bdrv_reopen_prepare
= nvme_reopen_prepare
,
1642 .bdrv_refresh_filename
= nvme_refresh_filename
,
1643 .bdrv_refresh_limits
= nvme_refresh_limits
,
1644 .strong_runtime_opts
= nvme_strong_runtime_opts
,
1645 .bdrv_get_specific_stats
= nvme_get_specific_stats
,
1647 .bdrv_detach_aio_context
= nvme_detach_aio_context
,
1648 .bdrv_attach_aio_context
= nvme_attach_aio_context
,
1650 .bdrv_register_buf
= nvme_register_buf
,
1651 .bdrv_unregister_buf
= nvme_unregister_buf
,
1654 static void bdrv_nvme_init(void)
1656 bdrv_register(&bdrv_nvme
);
1659 block_init(bdrv_nvme_init
);