4 * Copyright(c) 2017-2018 Intel Corporation.
5 * Copyright(c) 2020 Red Hat, Inc.
7 * This work is licensed under the terms of the GNU GPL, version 2 or later.
8 * See the COPYING file in the top-level directory.
12 #include "qemu/osdep.h"
13 #include <linux/vhost.h>
14 #include <linux/vfio.h>
15 #include <sys/eventfd.h>
16 #include <sys/ioctl.h>
17 #include "hw/virtio/vhost.h"
18 #include "hw/virtio/vhost-backend.h"
19 #include "hw/virtio/virtio-net.h"
20 #include "hw/virtio/vhost-shadow-virtqueue.h"
21 #include "hw/virtio/vhost-vdpa.h"
22 #include "exec/address-spaces.h"
23 #include "migration/blocker.h"
24 #include "qemu/cutils.h"
25 #include "qemu/main-loop.h"
28 #include "qapi/error.h"
31 * Return one past the end of the end of section. Be careful with uint64_t
34 static Int128
vhost_vdpa_section_end(const MemoryRegionSection
*section
)
36 Int128 llend
= int128_make64(section
->offset_within_address_space
);
37 llend
= int128_add(llend
, section
->size
);
38 llend
= int128_and(llend
, int128_exts64(TARGET_PAGE_MASK
));
43 static bool vhost_vdpa_listener_skipped_section(MemoryRegionSection
*section
,
49 if ((!memory_region_is_ram(section
->mr
) &&
50 !memory_region_is_iommu(section
->mr
)) ||
51 memory_region_is_protected(section
->mr
) ||
52 /* vhost-vDPA doesn't allow MMIO to be mapped */
53 memory_region_is_ram_device(section
->mr
)) {
57 if (section
->offset_within_address_space
< iova_min
) {
58 error_report("RAM section out of device range (min=0x%" PRIx64
59 ", addr=0x%" HWADDR_PRIx
")",
60 iova_min
, section
->offset_within_address_space
);
64 * While using vIOMMU, sometimes the section will be larger than iova_max,
65 * but the memory that actually maps is smaller, so move the check to
66 * function vhost_vdpa_iommu_map_notify(). That function will use the actual
67 * size that maps to the kernel
70 if (!memory_region_is_iommu(section
->mr
)) {
71 llend
= vhost_vdpa_section_end(section
);
72 if (int128_gt(llend
, int128_make64(iova_max
))) {
73 error_report("RAM section out of device range (max=0x%" PRIx64
74 ", end addr=0x%" PRIx64
")",
75 iova_max
, int128_get64(llend
));
84 * The caller must set asid = 0 if the device does not support asid.
85 * This is not an ABI break since it is set to 0 by the initializer anyway.
87 int vhost_vdpa_dma_map(struct vhost_vdpa
*v
, uint32_t asid
, hwaddr iova
,
88 hwaddr size
, void *vaddr
, bool readonly
)
90 struct vhost_msg_v2 msg
= {};
91 int fd
= v
->device_fd
;
94 msg
.type
= v
->msg_type
;
96 msg
.iotlb
.iova
= iova
;
97 msg
.iotlb
.size
= size
;
98 msg
.iotlb
.uaddr
= (uint64_t)(uintptr_t)vaddr
;
99 msg
.iotlb
.perm
= readonly
? VHOST_ACCESS_RO
: VHOST_ACCESS_RW
;
100 msg
.iotlb
.type
= VHOST_IOTLB_UPDATE
;
102 trace_vhost_vdpa_dma_map(v
, fd
, msg
.type
, msg
.asid
, msg
.iotlb
.iova
,
103 msg
.iotlb
.size
, msg
.iotlb
.uaddr
, msg
.iotlb
.perm
,
106 if (write(fd
, &msg
, sizeof(msg
)) != sizeof(msg
)) {
107 error_report("failed to write, fd=%d, errno=%d (%s)",
108 fd
, errno
, strerror(errno
));
116 * The caller must set asid = 0 if the device does not support asid.
117 * This is not an ABI break since it is set to 0 by the initializer anyway.
119 int vhost_vdpa_dma_unmap(struct vhost_vdpa
*v
, uint32_t asid
, hwaddr iova
,
122 struct vhost_msg_v2 msg
= {};
123 int fd
= v
->device_fd
;
126 msg
.type
= v
->msg_type
;
128 msg
.iotlb
.iova
= iova
;
129 msg
.iotlb
.size
= size
;
130 msg
.iotlb
.type
= VHOST_IOTLB_INVALIDATE
;
132 trace_vhost_vdpa_dma_unmap(v
, fd
, msg
.type
, msg
.asid
, msg
.iotlb
.iova
,
133 msg
.iotlb
.size
, msg
.iotlb
.type
);
135 if (write(fd
, &msg
, sizeof(msg
)) != sizeof(msg
)) {
136 error_report("failed to write, fd=%d, errno=%d (%s)",
137 fd
, errno
, strerror(errno
));
144 static void vhost_vdpa_listener_begin_batch(struct vhost_vdpa
*v
)
146 int fd
= v
->device_fd
;
147 struct vhost_msg_v2 msg
= {
149 .iotlb
.type
= VHOST_IOTLB_BATCH_BEGIN
,
152 trace_vhost_vdpa_listener_begin_batch(v
, fd
, msg
.type
, msg
.iotlb
.type
);
153 if (write(fd
, &msg
, sizeof(msg
)) != sizeof(msg
)) {
154 error_report("failed to write, fd=%d, errno=%d (%s)",
155 fd
, errno
, strerror(errno
));
159 static void vhost_vdpa_iotlb_batch_begin_once(struct vhost_vdpa
*v
)
161 if (v
->dev
->backend_cap
& (0x1ULL
<< VHOST_BACKEND_F_IOTLB_BATCH
) &&
162 !v
->iotlb_batch_begin_sent
) {
163 vhost_vdpa_listener_begin_batch(v
);
166 v
->iotlb_batch_begin_sent
= true;
169 static void vhost_vdpa_listener_commit(MemoryListener
*listener
)
171 struct vhost_vdpa
*v
= container_of(listener
, struct vhost_vdpa
, listener
);
172 struct vhost_dev
*dev
= v
->dev
;
173 struct vhost_msg_v2 msg
= {};
174 int fd
= v
->device_fd
;
176 if (!(dev
->backend_cap
& (0x1ULL
<< VHOST_BACKEND_F_IOTLB_BATCH
))) {
180 if (!v
->iotlb_batch_begin_sent
) {
184 msg
.type
= v
->msg_type
;
185 msg
.iotlb
.type
= VHOST_IOTLB_BATCH_END
;
187 trace_vhost_vdpa_listener_commit(v
, fd
, msg
.type
, msg
.iotlb
.type
);
188 if (write(fd
, &msg
, sizeof(msg
)) != sizeof(msg
)) {
189 error_report("failed to write, fd=%d, errno=%d (%s)",
190 fd
, errno
, strerror(errno
));
193 v
->iotlb_batch_begin_sent
= false;
196 static void vhost_vdpa_iommu_map_notify(IOMMUNotifier
*n
, IOMMUTLBEntry
*iotlb
)
198 struct vdpa_iommu
*iommu
= container_of(n
, struct vdpa_iommu
, n
);
200 hwaddr iova
= iotlb
->iova
+ iommu
->iommu_offset
;
201 struct vhost_vdpa
*v
= iommu
->dev
;
206 if (iotlb
->target_as
!= &address_space_memory
) {
207 error_report("Wrong target AS \"%s\", only system memory is allowed",
208 iotlb
->target_as
->name
? iotlb
->target_as
->name
: "none");
211 RCU_READ_LOCK_GUARD();
212 /* check if RAM section out of device range */
213 llend
= int128_add(int128_makes64(iotlb
->addr_mask
), int128_makes64(iova
));
214 if (int128_gt(llend
, int128_make64(v
->iova_range
.last
))) {
215 error_report("RAM section out of device range (max=0x%" PRIx64
216 ", end addr=0x%" PRIx64
")",
217 v
->iova_range
.last
, int128_get64(llend
));
221 if ((iotlb
->perm
& IOMMU_RW
) != IOMMU_NONE
) {
224 if (!memory_get_xlat_addr(iotlb
, &vaddr
, NULL
, &read_only
, NULL
)) {
227 ret
= vhost_vdpa_dma_map(v
, VHOST_VDPA_GUEST_PA_ASID
, iova
,
228 iotlb
->addr_mask
+ 1, vaddr
, read_only
);
230 error_report("vhost_vdpa_dma_map(%p, 0x%" HWADDR_PRIx
", "
231 "0x%" HWADDR_PRIx
", %p) = %d (%m)",
232 v
, iova
, iotlb
->addr_mask
+ 1, vaddr
, ret
);
235 ret
= vhost_vdpa_dma_unmap(v
, VHOST_VDPA_GUEST_PA_ASID
, iova
,
236 iotlb
->addr_mask
+ 1);
238 error_report("vhost_vdpa_dma_unmap(%p, 0x%" HWADDR_PRIx
", "
239 "0x%" HWADDR_PRIx
") = %d (%m)",
240 v
, iova
, iotlb
->addr_mask
+ 1, ret
);
245 static void vhost_vdpa_iommu_region_add(MemoryListener
*listener
,
246 MemoryRegionSection
*section
)
248 struct vhost_vdpa
*v
= container_of(listener
, struct vhost_vdpa
, listener
);
250 struct vdpa_iommu
*iommu
;
253 IOMMUMemoryRegion
*iommu_mr
;
256 iommu_mr
= IOMMU_MEMORY_REGION(section
->mr
);
258 iommu
= g_malloc0(sizeof(*iommu
));
259 end
= int128_add(int128_make64(section
->offset_within_region
),
261 end
= int128_sub(end
, int128_one());
262 iommu_idx
= memory_region_iommu_attrs_to_index(iommu_mr
,
263 MEMTXATTRS_UNSPECIFIED
);
264 iommu
->iommu_mr
= iommu_mr
;
265 iommu_notifier_init(&iommu
->n
, vhost_vdpa_iommu_map_notify
,
266 IOMMU_NOTIFIER_IOTLB_EVENTS
,
267 section
->offset_within_region
,
270 iommu
->iommu_offset
= section
->offset_within_address_space
-
271 section
->offset_within_region
;
274 ret
= memory_region_register_iommu_notifier(section
->mr
, &iommu
->n
, NULL
);
280 QLIST_INSERT_HEAD(&v
->iommu_list
, iommu
, iommu_next
);
281 memory_region_iommu_replay(iommu
->iommu_mr
, &iommu
->n
);
286 static void vhost_vdpa_iommu_region_del(MemoryListener
*listener
,
287 MemoryRegionSection
*section
)
289 struct vhost_vdpa
*v
= container_of(listener
, struct vhost_vdpa
, listener
);
291 struct vdpa_iommu
*iommu
;
293 QLIST_FOREACH(iommu
, &v
->iommu_list
, iommu_next
)
295 if (MEMORY_REGION(iommu
->iommu_mr
) == section
->mr
&&
296 iommu
->n
.start
== section
->offset_within_region
) {
297 memory_region_unregister_iommu_notifier(section
->mr
, &iommu
->n
);
298 QLIST_REMOVE(iommu
, iommu_next
);
305 static void vhost_vdpa_listener_region_add(MemoryListener
*listener
,
306 MemoryRegionSection
*section
)
308 DMAMap mem_region
= {};
309 struct vhost_vdpa
*v
= container_of(listener
, struct vhost_vdpa
, listener
);
311 Int128 llend
, llsize
;
315 if (vhost_vdpa_listener_skipped_section(section
, v
->iova_range
.first
,
316 v
->iova_range
.last
)) {
319 if (memory_region_is_iommu(section
->mr
)) {
320 vhost_vdpa_iommu_region_add(listener
, section
);
324 if (unlikely((section
->offset_within_address_space
& ~TARGET_PAGE_MASK
) !=
325 (section
->offset_within_region
& ~TARGET_PAGE_MASK
))) {
326 error_report("%s received unaligned region", __func__
);
330 iova
= TARGET_PAGE_ALIGN(section
->offset_within_address_space
);
331 llend
= vhost_vdpa_section_end(section
);
332 if (int128_ge(int128_make64(iova
), llend
)) {
336 memory_region_ref(section
->mr
);
338 /* Here we assume that memory_region_is_ram(section->mr)==true */
340 vaddr
= memory_region_get_ram_ptr(section
->mr
) +
341 section
->offset_within_region
+
342 (iova
- section
->offset_within_address_space
);
344 trace_vhost_vdpa_listener_region_add(v
, iova
, int128_get64(llend
),
345 vaddr
, section
->readonly
);
347 llsize
= int128_sub(llend
, int128_make64(iova
));
348 if (v
->shadow_data
) {
351 mem_region
.translated_addr
= (hwaddr
)(uintptr_t)vaddr
,
352 mem_region
.size
= int128_get64(llsize
) - 1,
353 mem_region
.perm
= IOMMU_ACCESS_FLAG(true, section
->readonly
),
355 r
= vhost_iova_tree_map_alloc(v
->iova_tree
, &mem_region
);
356 if (unlikely(r
!= IOVA_OK
)) {
357 error_report("Can't allocate a mapping (%d)", r
);
361 iova
= mem_region
.iova
;
364 vhost_vdpa_iotlb_batch_begin_once(v
);
365 ret
= vhost_vdpa_dma_map(v
, VHOST_VDPA_GUEST_PA_ASID
, iova
,
366 int128_get64(llsize
), vaddr
, section
->readonly
);
368 error_report("vhost vdpa map fail!");
375 if (v
->shadow_data
) {
376 vhost_iova_tree_remove(v
->iova_tree
, mem_region
);
381 * On the initfn path, store the first error in the container so we
382 * can gracefully fail. Runtime, there's not much we can do other
383 * than throw a hardware error.
385 error_report("vhost-vdpa: DMA mapping failed, unable to continue");
390 static void vhost_vdpa_listener_region_del(MemoryListener
*listener
,
391 MemoryRegionSection
*section
)
393 struct vhost_vdpa
*v
= container_of(listener
, struct vhost_vdpa
, listener
);
395 Int128 llend
, llsize
;
398 if (vhost_vdpa_listener_skipped_section(section
, v
->iova_range
.first
,
399 v
->iova_range
.last
)) {
402 if (memory_region_is_iommu(section
->mr
)) {
403 vhost_vdpa_iommu_region_del(listener
, section
);
406 if (unlikely((section
->offset_within_address_space
& ~TARGET_PAGE_MASK
) !=
407 (section
->offset_within_region
& ~TARGET_PAGE_MASK
))) {
408 error_report("%s received unaligned region", __func__
);
412 iova
= TARGET_PAGE_ALIGN(section
->offset_within_address_space
);
413 llend
= vhost_vdpa_section_end(section
);
415 trace_vhost_vdpa_listener_region_del(v
, iova
,
416 int128_get64(int128_sub(llend
, int128_one())));
418 if (int128_ge(int128_make64(iova
), llend
)) {
422 llsize
= int128_sub(llend
, int128_make64(iova
));
424 if (v
->shadow_data
) {
425 const DMAMap
*result
;
426 const void *vaddr
= memory_region_get_ram_ptr(section
->mr
) +
427 section
->offset_within_region
+
428 (iova
- section
->offset_within_address_space
);
429 DMAMap mem_region
= {
430 .translated_addr
= (hwaddr
)(uintptr_t)vaddr
,
431 .size
= int128_get64(llsize
) - 1,
434 result
= vhost_iova_tree_find_iova(v
->iova_tree
, &mem_region
);
436 /* The memory listener map wasn't mapped */
440 vhost_iova_tree_remove(v
->iova_tree
, *result
);
442 vhost_vdpa_iotlb_batch_begin_once(v
);
444 * The unmap ioctl doesn't accept a full 64-bit. need to check it
446 if (int128_eq(llsize
, int128_2_64())) {
447 llsize
= int128_rshift(llsize
, 1);
448 ret
= vhost_vdpa_dma_unmap(v
, VHOST_VDPA_GUEST_PA_ASID
, iova
,
449 int128_get64(llsize
));
452 error_report("vhost_vdpa_dma_unmap(%p, 0x%" HWADDR_PRIx
", "
453 "0x%" HWADDR_PRIx
") = %d (%m)",
454 v
, iova
, int128_get64(llsize
), ret
);
456 iova
+= int128_get64(llsize
);
458 ret
= vhost_vdpa_dma_unmap(v
, VHOST_VDPA_GUEST_PA_ASID
, iova
,
459 int128_get64(llsize
));
462 error_report("vhost_vdpa_dma_unmap(%p, 0x%" HWADDR_PRIx
", "
463 "0x%" HWADDR_PRIx
") = %d (%m)",
464 v
, iova
, int128_get64(llsize
), ret
);
467 memory_region_unref(section
->mr
);
470 * IOTLB API is used by vhost-vdpa which requires incremental updating
471 * of the mapping. So we can not use generic vhost memory listener which
472 * depends on the addnop().
474 static const MemoryListener vhost_vdpa_memory_listener
= {
475 .name
= "vhost-vdpa",
476 .commit
= vhost_vdpa_listener_commit
,
477 .region_add
= vhost_vdpa_listener_region_add
,
478 .region_del
= vhost_vdpa_listener_region_del
,
481 static int vhost_vdpa_call(struct vhost_dev
*dev
, unsigned long int request
,
484 struct vhost_vdpa
*v
= dev
->opaque
;
485 int fd
= v
->device_fd
;
488 assert(dev
->vhost_ops
->backend_type
== VHOST_BACKEND_TYPE_VDPA
);
490 ret
= ioctl(fd
, request
, arg
);
491 return ret
< 0 ? -errno
: ret
;
494 static int vhost_vdpa_add_status(struct vhost_dev
*dev
, uint8_t status
)
499 trace_vhost_vdpa_add_status(dev
, status
);
500 ret
= vhost_vdpa_call(dev
, VHOST_VDPA_GET_STATUS
, &s
);
507 ret
= vhost_vdpa_call(dev
, VHOST_VDPA_SET_STATUS
, &s
);
512 ret
= vhost_vdpa_call(dev
, VHOST_VDPA_GET_STATUS
, &s
);
524 int vhost_vdpa_get_iova_range(int fd
, struct vhost_vdpa_iova_range
*iova_range
)
526 int ret
= ioctl(fd
, VHOST_VDPA_GET_IOVA_RANGE
, iova_range
);
528 return ret
< 0 ? -errno
: 0;
532 * The use of this function is for requests that only need to be
533 * applied once. Typically such request occurs at the beginning
534 * of operation, and before setting up queues. It should not be
535 * used for request that performs operation until all queues are
536 * set, which would need to check dev->vq_index_end instead.
538 static bool vhost_vdpa_first_dev(struct vhost_dev
*dev
)
540 struct vhost_vdpa
*v
= dev
->opaque
;
542 return v
->index
== 0;
545 static int vhost_vdpa_get_dev_features(struct vhost_dev
*dev
,
550 ret
= vhost_vdpa_call(dev
, VHOST_GET_FEATURES
, features
);
551 trace_vhost_vdpa_get_features(dev
, *features
);
555 static void vhost_vdpa_init_svq(struct vhost_dev
*hdev
, struct vhost_vdpa
*v
)
557 g_autoptr(GPtrArray
) shadow_vqs
= NULL
;
559 shadow_vqs
= g_ptr_array_new_full(hdev
->nvqs
, vhost_svq_free
);
560 for (unsigned n
= 0; n
< hdev
->nvqs
; ++n
) {
561 VhostShadowVirtqueue
*svq
;
563 svq
= vhost_svq_new(v
->shadow_vq_ops
, v
->shadow_vq_ops_opaque
);
564 g_ptr_array_add(shadow_vqs
, svq
);
567 v
->shadow_vqs
= g_steal_pointer(&shadow_vqs
);
570 static int vhost_vdpa_init(struct vhost_dev
*dev
, void *opaque
, Error
**errp
)
572 struct vhost_vdpa
*v
;
573 assert(dev
->vhost_ops
->backend_type
== VHOST_BACKEND_TYPE_VDPA
);
574 trace_vhost_vdpa_init(dev
, opaque
);
579 dev
->opaque
= opaque
;
580 v
->listener
= vhost_vdpa_memory_listener
;
581 v
->msg_type
= VHOST_IOTLB_MSG_V2
;
582 vhost_vdpa_init_svq(dev
, v
);
584 error_propagate(&dev
->migration_blocker
, v
->migration_blocker
);
585 if (!vhost_vdpa_first_dev(dev
)) {
590 * If dev->shadow_vqs_enabled at initialization that means the device has
591 * been started with x-svq=on, so don't block migration
593 if (dev
->migration_blocker
== NULL
&& !v
->shadow_vqs_enabled
) {
594 /* We don't have dev->features yet */
596 ret
= vhost_vdpa_get_dev_features(dev
, &features
);
598 error_setg_errno(errp
, -ret
, "Could not get device features");
601 vhost_svq_valid_features(features
, &dev
->migration_blocker
);
605 * Similar to VFIO, we end up pinning all guest memory and have to
606 * disable discarding of RAM.
608 ret
= ram_block_discard_disable(true);
610 error_report("Cannot set discarding of RAM broken");
614 vhost_vdpa_add_status(dev
, VIRTIO_CONFIG_S_ACKNOWLEDGE
|
615 VIRTIO_CONFIG_S_DRIVER
);
620 static void vhost_vdpa_host_notifier_uninit(struct vhost_dev
*dev
,
623 size_t page_size
= qemu_real_host_page_size();
624 struct vhost_vdpa
*v
= dev
->opaque
;
625 VirtIODevice
*vdev
= dev
->vdev
;
626 VhostVDPAHostNotifier
*n
;
628 n
= &v
->notifier
[queue_index
];
631 virtio_queue_set_host_notifier_mr(vdev
, queue_index
, &n
->mr
, false);
632 object_unparent(OBJECT(&n
->mr
));
633 munmap(n
->addr
, page_size
);
638 static int vhost_vdpa_host_notifier_init(struct vhost_dev
*dev
, int queue_index
)
640 size_t page_size
= qemu_real_host_page_size();
641 struct vhost_vdpa
*v
= dev
->opaque
;
642 VirtIODevice
*vdev
= dev
->vdev
;
643 VhostVDPAHostNotifier
*n
;
644 int fd
= v
->device_fd
;
648 vhost_vdpa_host_notifier_uninit(dev
, queue_index
);
650 n
= &v
->notifier
[queue_index
];
652 addr
= mmap(NULL
, page_size
, PROT_WRITE
, MAP_SHARED
, fd
,
653 queue_index
* page_size
);
654 if (addr
== MAP_FAILED
) {
658 name
= g_strdup_printf("vhost-vdpa/host-notifier@%p mmaps[%d]",
660 memory_region_init_ram_device_ptr(&n
->mr
, OBJECT(vdev
), name
,
664 if (virtio_queue_set_host_notifier_mr(vdev
, queue_index
, &n
->mr
, true)) {
665 object_unparent(OBJECT(&n
->mr
));
666 munmap(addr
, page_size
);
677 static void vhost_vdpa_host_notifiers_uninit(struct vhost_dev
*dev
, int n
)
682 * Pack all the changes to the memory regions in a single
683 * transaction to avoid a few updating of the address space
686 memory_region_transaction_begin();
688 for (i
= dev
->vq_index
; i
< dev
->vq_index
+ n
; i
++) {
689 vhost_vdpa_host_notifier_uninit(dev
, i
);
692 memory_region_transaction_commit();
695 static void vhost_vdpa_host_notifiers_init(struct vhost_dev
*dev
)
697 struct vhost_vdpa
*v
= dev
->opaque
;
700 if (v
->shadow_vqs_enabled
) {
701 /* FIXME SVQ is not compatible with host notifiers mr */
706 * Pack all the changes to the memory regions in a single
707 * transaction to avoid a few updating of the address space
710 memory_region_transaction_begin();
712 for (i
= dev
->vq_index
; i
< dev
->vq_index
+ dev
->nvqs
; i
++) {
713 if (vhost_vdpa_host_notifier_init(dev
, i
)) {
714 vhost_vdpa_host_notifiers_uninit(dev
, i
- dev
->vq_index
);
719 memory_region_transaction_commit();
722 static void vhost_vdpa_svq_cleanup(struct vhost_dev
*dev
)
724 struct vhost_vdpa
*v
= dev
->opaque
;
727 for (idx
= 0; idx
< v
->shadow_vqs
->len
; ++idx
) {
728 vhost_svq_stop(g_ptr_array_index(v
->shadow_vqs
, idx
));
730 g_ptr_array_free(v
->shadow_vqs
, true);
733 static int vhost_vdpa_cleanup(struct vhost_dev
*dev
)
735 struct vhost_vdpa
*v
;
736 assert(dev
->vhost_ops
->backend_type
== VHOST_BACKEND_TYPE_VDPA
);
738 trace_vhost_vdpa_cleanup(dev
, v
);
739 if (vhost_vdpa_first_dev(dev
)) {
740 ram_block_discard_disable(false);
743 vhost_vdpa_host_notifiers_uninit(dev
, dev
->nvqs
);
744 memory_listener_unregister(&v
->listener
);
745 vhost_vdpa_svq_cleanup(dev
);
752 static int vhost_vdpa_memslots_limit(struct vhost_dev
*dev
)
754 trace_vhost_vdpa_memslots_limit(dev
, INT_MAX
);
758 static int vhost_vdpa_set_mem_table(struct vhost_dev
*dev
,
759 struct vhost_memory
*mem
)
761 if (!vhost_vdpa_first_dev(dev
)) {
765 trace_vhost_vdpa_set_mem_table(dev
, mem
->nregions
, mem
->padding
);
766 if (trace_event_get_state_backends(TRACE_VHOST_VDPA_SET_MEM_TABLE
) &&
767 trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_REGIONS
)) {
769 for (i
= 0; i
< mem
->nregions
; i
++) {
770 trace_vhost_vdpa_dump_regions(dev
, i
,
771 mem
->regions
[i
].guest_phys_addr
,
772 mem
->regions
[i
].memory_size
,
773 mem
->regions
[i
].userspace_addr
,
774 mem
->regions
[i
].flags_padding
);
784 static int vhost_vdpa_set_features(struct vhost_dev
*dev
,
787 struct vhost_vdpa
*v
= dev
->opaque
;
790 if (!vhost_vdpa_first_dev(dev
)) {
794 if (v
->shadow_vqs_enabled
) {
795 if ((v
->acked_features
^ features
) == BIT_ULL(VHOST_F_LOG_ALL
)) {
797 * QEMU is just trying to enable or disable logging. SVQ handles
798 * this sepparately, so no need to forward this.
800 v
->acked_features
= features
;
804 v
->acked_features
= features
;
806 /* We must not ack _F_LOG if SVQ is enabled */
807 features
&= ~BIT_ULL(VHOST_F_LOG_ALL
);
810 trace_vhost_vdpa_set_features(dev
, features
);
811 ret
= vhost_vdpa_call(dev
, VHOST_SET_FEATURES
, &features
);
816 return vhost_vdpa_add_status(dev
, VIRTIO_CONFIG_S_FEATURES_OK
);
819 static int vhost_vdpa_set_backend_cap(struct vhost_dev
*dev
)
822 uint64_t f
= 0x1ULL
<< VHOST_BACKEND_F_IOTLB_MSG_V2
|
823 0x1ULL
<< VHOST_BACKEND_F_IOTLB_BATCH
|
824 0x1ULL
<< VHOST_BACKEND_F_IOTLB_ASID
|
825 0x1ULL
<< VHOST_BACKEND_F_SUSPEND
;
828 if (vhost_vdpa_call(dev
, VHOST_GET_BACKEND_FEATURES
, &features
)) {
834 if (vhost_vdpa_first_dev(dev
)) {
835 r
= vhost_vdpa_call(dev
, VHOST_SET_BACKEND_FEATURES
, &features
);
841 dev
->backend_cap
= features
;
846 static int vhost_vdpa_get_device_id(struct vhost_dev
*dev
,
850 ret
= vhost_vdpa_call(dev
, VHOST_VDPA_GET_DEVICE_ID
, device_id
);
851 trace_vhost_vdpa_get_device_id(dev
, *device_id
);
855 static int vhost_vdpa_reset_device(struct vhost_dev
*dev
)
857 struct vhost_vdpa
*v
= dev
->opaque
;
861 ret
= vhost_vdpa_call(dev
, VHOST_VDPA_SET_STATUS
, &status
);
862 trace_vhost_vdpa_reset_device(dev
, status
);
863 v
->suspended
= false;
867 static int vhost_vdpa_get_vq_index(struct vhost_dev
*dev
, int idx
)
869 assert(idx
>= dev
->vq_index
&& idx
< dev
->vq_index
+ dev
->nvqs
);
871 trace_vhost_vdpa_get_vq_index(dev
, idx
, idx
);
875 static int vhost_vdpa_set_vring_ready(struct vhost_dev
*dev
)
878 trace_vhost_vdpa_set_vring_ready(dev
);
879 for (i
= 0; i
< dev
->nvqs
; ++i
) {
880 struct vhost_vring_state state
= {
881 .index
= dev
->vq_index
+ i
,
884 vhost_vdpa_call(dev
, VHOST_VDPA_SET_VRING_ENABLE
, &state
);
889 static int vhost_vdpa_set_config_call(struct vhost_dev
*dev
,
892 trace_vhost_vdpa_set_config_call(dev
, fd
);
893 return vhost_vdpa_call(dev
, VHOST_VDPA_SET_CONFIG_CALL
, &fd
);
896 static void vhost_vdpa_dump_config(struct vhost_dev
*dev
, const uint8_t *config
,
900 char line
[QEMU_HEXDUMP_LINE_LEN
];
902 for (b
= 0; b
< config_len
; b
+= 16) {
903 len
= config_len
- b
;
904 qemu_hexdump_line(line
, b
, config
, len
, false);
905 trace_vhost_vdpa_dump_config(dev
, line
);
909 static int vhost_vdpa_set_config(struct vhost_dev
*dev
, const uint8_t *data
,
910 uint32_t offset
, uint32_t size
,
913 struct vhost_vdpa_config
*config
;
915 unsigned long config_size
= offsetof(struct vhost_vdpa_config
, buf
);
917 trace_vhost_vdpa_set_config(dev
, offset
, size
, flags
);
918 config
= g_malloc(size
+ config_size
);
919 config
->off
= offset
;
921 memcpy(config
->buf
, data
, size
);
922 if (trace_event_get_state_backends(TRACE_VHOST_VDPA_SET_CONFIG
) &&
923 trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_CONFIG
)) {
924 vhost_vdpa_dump_config(dev
, data
, size
);
926 ret
= vhost_vdpa_call(dev
, VHOST_VDPA_SET_CONFIG
, config
);
931 static int vhost_vdpa_get_config(struct vhost_dev
*dev
, uint8_t *config
,
932 uint32_t config_len
, Error
**errp
)
934 struct vhost_vdpa_config
*v_config
;
935 unsigned long config_size
= offsetof(struct vhost_vdpa_config
, buf
);
938 trace_vhost_vdpa_get_config(dev
, config
, config_len
);
939 v_config
= g_malloc(config_len
+ config_size
);
940 v_config
->len
= config_len
;
942 ret
= vhost_vdpa_call(dev
, VHOST_VDPA_GET_CONFIG
, v_config
);
943 memcpy(config
, v_config
->buf
, config_len
);
945 if (trace_event_get_state_backends(TRACE_VHOST_VDPA_GET_CONFIG
) &&
946 trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_CONFIG
)) {
947 vhost_vdpa_dump_config(dev
, config
, config_len
);
952 static int vhost_vdpa_set_dev_vring_base(struct vhost_dev
*dev
,
953 struct vhost_vring_state
*ring
)
955 trace_vhost_vdpa_set_vring_base(dev
, ring
->index
, ring
->num
);
956 return vhost_vdpa_call(dev
, VHOST_SET_VRING_BASE
, ring
);
959 static int vhost_vdpa_set_vring_dev_kick(struct vhost_dev
*dev
,
960 struct vhost_vring_file
*file
)
962 trace_vhost_vdpa_set_vring_kick(dev
, file
->index
, file
->fd
);
963 return vhost_vdpa_call(dev
, VHOST_SET_VRING_KICK
, file
);
966 static int vhost_vdpa_set_vring_dev_call(struct vhost_dev
*dev
,
967 struct vhost_vring_file
*file
)
969 trace_vhost_vdpa_set_vring_call(dev
, file
->index
, file
->fd
);
970 return vhost_vdpa_call(dev
, VHOST_SET_VRING_CALL
, file
);
973 static int vhost_vdpa_set_vring_dev_addr(struct vhost_dev
*dev
,
974 struct vhost_vring_addr
*addr
)
976 trace_vhost_vdpa_set_vring_addr(dev
, addr
->index
, addr
->flags
,
977 addr
->desc_user_addr
, addr
->used_user_addr
,
978 addr
->avail_user_addr
,
979 addr
->log_guest_addr
);
981 return vhost_vdpa_call(dev
, VHOST_SET_VRING_ADDR
, addr
);
986 * Set the shadow virtqueue descriptors to the device
988 * @dev: The vhost device model
989 * @svq: The shadow virtqueue
990 * @idx: The index of the virtqueue in the vhost device
993 * Note that this function does not rewind kick file descriptor if cannot set
996 static int vhost_vdpa_svq_set_fds(struct vhost_dev
*dev
,
997 VhostShadowVirtqueue
*svq
, unsigned idx
,
1000 struct vhost_vring_file file
= {
1001 .index
= dev
->vq_index
+ idx
,
1003 const EventNotifier
*event_notifier
= &svq
->hdev_kick
;
1006 r
= event_notifier_init(&svq
->hdev_kick
, 0);
1008 error_setg_errno(errp
, -r
, "Couldn't create kick event notifier");
1009 goto err_init_hdev_kick
;
1012 r
= event_notifier_init(&svq
->hdev_call
, 0);
1014 error_setg_errno(errp
, -r
, "Couldn't create call event notifier");
1015 goto err_init_hdev_call
;
1018 file
.fd
= event_notifier_get_fd(event_notifier
);
1019 r
= vhost_vdpa_set_vring_dev_kick(dev
, &file
);
1020 if (unlikely(r
!= 0)) {
1021 error_setg_errno(errp
, -r
, "Can't set device kick fd");
1022 goto err_init_set_dev_fd
;
1025 event_notifier
= &svq
->hdev_call
;
1026 file
.fd
= event_notifier_get_fd(event_notifier
);
1027 r
= vhost_vdpa_set_vring_dev_call(dev
, &file
);
1028 if (unlikely(r
!= 0)) {
1029 error_setg_errno(errp
, -r
, "Can't set device call fd");
1030 goto err_init_set_dev_fd
;
1035 err_init_set_dev_fd
:
1036 event_notifier_set_handler(&svq
->hdev_call
, NULL
);
1039 event_notifier_cleanup(&svq
->hdev_kick
);
1046 * Unmap a SVQ area in the device
1048 static void vhost_vdpa_svq_unmap_ring(struct vhost_vdpa
*v
, hwaddr addr
)
1050 const DMAMap needle
= {
1051 .translated_addr
= addr
,
1053 const DMAMap
*result
= vhost_iova_tree_find_iova(v
->iova_tree
, &needle
);
1057 if (unlikely(!result
)) {
1058 error_report("Unable to find SVQ address to unmap");
1062 size
= ROUND_UP(result
->size
, qemu_real_host_page_size());
1063 r
= vhost_vdpa_dma_unmap(v
, v
->address_space_id
, result
->iova
, size
);
1064 if (unlikely(r
< 0)) {
1065 error_report("Unable to unmap SVQ vring: %s (%d)", g_strerror(-r
), -r
);
1069 vhost_iova_tree_remove(v
->iova_tree
, *result
);
1072 static void vhost_vdpa_svq_unmap_rings(struct vhost_dev
*dev
,
1073 const VhostShadowVirtqueue
*svq
)
1075 struct vhost_vdpa
*v
= dev
->opaque
;
1076 struct vhost_vring_addr svq_addr
;
1078 vhost_svq_get_vring_addr(svq
, &svq_addr
);
1080 vhost_vdpa_svq_unmap_ring(v
, svq_addr
.desc_user_addr
);
1082 vhost_vdpa_svq_unmap_ring(v
, svq_addr
.used_user_addr
);
1086 * Map the SVQ area in the device
1088 * @v: Vhost-vdpa device
1089 * @needle: The area to search iova
1090 * @errorp: Error pointer
1092 static bool vhost_vdpa_svq_map_ring(struct vhost_vdpa
*v
, DMAMap
*needle
,
1097 r
= vhost_iova_tree_map_alloc(v
->iova_tree
, needle
);
1098 if (unlikely(r
!= IOVA_OK
)) {
1099 error_setg(errp
, "Cannot allocate iova (%d)", r
);
1103 r
= vhost_vdpa_dma_map(v
, v
->address_space_id
, needle
->iova
,
1105 (void *)(uintptr_t)needle
->translated_addr
,
1106 needle
->perm
== IOMMU_RO
);
1107 if (unlikely(r
!= 0)) {
1108 error_setg_errno(errp
, -r
, "Cannot map region to device");
1109 vhost_iova_tree_remove(v
->iova_tree
, *needle
);
1116 * Map the shadow virtqueue rings in the device
1118 * @dev: The vhost device
1119 * @svq: The shadow virtqueue
1120 * @addr: Assigned IOVA addresses
1121 * @errp: Error pointer
1123 static bool vhost_vdpa_svq_map_rings(struct vhost_dev
*dev
,
1124 const VhostShadowVirtqueue
*svq
,
1125 struct vhost_vring_addr
*addr
,
1129 DMAMap device_region
, driver_region
;
1130 struct vhost_vring_addr svq_addr
;
1131 struct vhost_vdpa
*v
= dev
->opaque
;
1132 size_t device_size
= vhost_svq_device_area_size(svq
);
1133 size_t driver_size
= vhost_svq_driver_area_size(svq
);
1134 size_t avail_offset
;
1137 vhost_svq_get_vring_addr(svq
, &svq_addr
);
1139 driver_region
= (DMAMap
) {
1140 .translated_addr
= svq_addr
.desc_user_addr
,
1141 .size
= driver_size
- 1,
1144 ok
= vhost_vdpa_svq_map_ring(v
, &driver_region
, errp
);
1145 if (unlikely(!ok
)) {
1146 error_prepend(errp
, "Cannot create vq driver region: ");
1149 addr
->desc_user_addr
= driver_region
.iova
;
1150 avail_offset
= svq_addr
.avail_user_addr
- svq_addr
.desc_user_addr
;
1151 addr
->avail_user_addr
= driver_region
.iova
+ avail_offset
;
1153 device_region
= (DMAMap
) {
1154 .translated_addr
= svq_addr
.used_user_addr
,
1155 .size
= device_size
- 1,
1158 ok
= vhost_vdpa_svq_map_ring(v
, &device_region
, errp
);
1159 if (unlikely(!ok
)) {
1160 error_prepend(errp
, "Cannot create vq device region: ");
1161 vhost_vdpa_svq_unmap_ring(v
, driver_region
.translated_addr
);
1163 addr
->used_user_addr
= device_region
.iova
;
1168 static bool vhost_vdpa_svq_setup(struct vhost_dev
*dev
,
1169 VhostShadowVirtqueue
*svq
, unsigned idx
,
1172 uint16_t vq_index
= dev
->vq_index
+ idx
;
1173 struct vhost_vring_state s
= {
1178 r
= vhost_vdpa_set_dev_vring_base(dev
, &s
);
1180 error_setg_errno(errp
, -r
, "Cannot set vring base");
1184 r
= vhost_vdpa_svq_set_fds(dev
, svq
, idx
, errp
);
1188 static bool vhost_vdpa_svqs_start(struct vhost_dev
*dev
)
1190 struct vhost_vdpa
*v
= dev
->opaque
;
1194 if (!v
->shadow_vqs_enabled
) {
1198 for (i
= 0; i
< v
->shadow_vqs
->len
; ++i
) {
1199 VirtQueue
*vq
= virtio_get_queue(dev
->vdev
, dev
->vq_index
+ i
);
1200 VhostShadowVirtqueue
*svq
= g_ptr_array_index(v
->shadow_vqs
, i
);
1201 struct vhost_vring_addr addr
= {
1202 .index
= dev
->vq_index
+ i
,
1205 bool ok
= vhost_vdpa_svq_setup(dev
, svq
, i
, &err
);
1206 if (unlikely(!ok
)) {
1210 vhost_svq_start(svq
, dev
->vdev
, vq
, v
->iova_tree
);
1211 ok
= vhost_vdpa_svq_map_rings(dev
, svq
, &addr
, &err
);
1212 if (unlikely(!ok
)) {
1216 /* Override vring GPA set by vhost subsystem */
1217 r
= vhost_vdpa_set_vring_dev_addr(dev
, &addr
);
1218 if (unlikely(r
!= 0)) {
1219 error_setg_errno(&err
, -r
, "Cannot set device address");
1227 vhost_vdpa_svq_unmap_rings(dev
, g_ptr_array_index(v
->shadow_vqs
, i
));
1230 vhost_svq_stop(g_ptr_array_index(v
->shadow_vqs
, i
));
1233 error_reportf_err(err
, "Cannot setup SVQ %u: ", i
);
1234 for (unsigned j
= 0; j
< i
; ++j
) {
1235 VhostShadowVirtqueue
*svq
= g_ptr_array_index(v
->shadow_vqs
, j
);
1236 vhost_vdpa_svq_unmap_rings(dev
, svq
);
1237 vhost_svq_stop(svq
);
1243 static void vhost_vdpa_svqs_stop(struct vhost_dev
*dev
)
1245 struct vhost_vdpa
*v
= dev
->opaque
;
1247 if (!v
->shadow_vqs_enabled
) {
1251 for (unsigned i
= 0; i
< v
->shadow_vqs
->len
; ++i
) {
1252 VhostShadowVirtqueue
*svq
= g_ptr_array_index(v
->shadow_vqs
, i
);
1254 vhost_svq_stop(svq
);
1255 vhost_vdpa_svq_unmap_rings(dev
, svq
);
1257 event_notifier_cleanup(&svq
->hdev_kick
);
1258 event_notifier_cleanup(&svq
->hdev_call
);
1262 static void vhost_vdpa_suspend(struct vhost_dev
*dev
)
1264 struct vhost_vdpa
*v
= dev
->opaque
;
1267 if (!vhost_vdpa_first_dev(dev
)) {
1271 if (dev
->backend_cap
& BIT_ULL(VHOST_BACKEND_F_SUSPEND
)) {
1272 trace_vhost_vdpa_suspend(dev
);
1273 r
= ioctl(v
->device_fd
, VHOST_VDPA_SUSPEND
);
1275 error_report("Cannot suspend: %s(%d)", g_strerror(errno
), errno
);
1277 v
->suspended
= true;
1282 vhost_vdpa_reset_device(dev
);
1285 static int vhost_vdpa_dev_start(struct vhost_dev
*dev
, bool started
)
1287 struct vhost_vdpa
*v
= dev
->opaque
;
1289 trace_vhost_vdpa_dev_start(dev
, started
);
1292 vhost_vdpa_host_notifiers_init(dev
);
1293 ok
= vhost_vdpa_svqs_start(dev
);
1294 if (unlikely(!ok
)) {
1297 vhost_vdpa_set_vring_ready(dev
);
1299 vhost_vdpa_suspend(dev
);
1300 vhost_vdpa_svqs_stop(dev
);
1301 vhost_vdpa_host_notifiers_uninit(dev
, dev
->nvqs
);
1304 if (dev
->vq_index
+ dev
->nvqs
!= dev
->vq_index_end
) {
1309 if (vhost_dev_has_iommu(dev
) && (v
->shadow_vqs_enabled
)) {
1310 error_report("SVQ can not work while IOMMU enable, please disable"
1311 "IOMMU and try again");
1314 memory_listener_register(&v
->listener
, dev
->vdev
->dma_as
);
1316 return vhost_vdpa_add_status(dev
, VIRTIO_CONFIG_S_DRIVER_OK
);
1322 static void vhost_vdpa_reset_status(struct vhost_dev
*dev
)
1324 struct vhost_vdpa
*v
= dev
->opaque
;
1326 if (dev
->vq_index
+ dev
->nvqs
!= dev
->vq_index_end
) {
1330 vhost_vdpa_reset_device(dev
);
1331 vhost_vdpa_add_status(dev
, VIRTIO_CONFIG_S_ACKNOWLEDGE
|
1332 VIRTIO_CONFIG_S_DRIVER
);
1333 memory_listener_unregister(&v
->listener
);
1336 static int vhost_vdpa_set_log_base(struct vhost_dev
*dev
, uint64_t base
,
1337 struct vhost_log
*log
)
1339 struct vhost_vdpa
*v
= dev
->opaque
;
1340 if (v
->shadow_vqs_enabled
|| !vhost_vdpa_first_dev(dev
)) {
1344 trace_vhost_vdpa_set_log_base(dev
, base
, log
->size
, log
->refcnt
, log
->fd
,
1346 return vhost_vdpa_call(dev
, VHOST_SET_LOG_BASE
, &base
);
1349 static int vhost_vdpa_set_vring_addr(struct vhost_dev
*dev
,
1350 struct vhost_vring_addr
*addr
)
1352 struct vhost_vdpa
*v
= dev
->opaque
;
1354 if (v
->shadow_vqs_enabled
) {
1356 * Device vring addr was set at device start. SVQ base is handled by
1362 return vhost_vdpa_set_vring_dev_addr(dev
, addr
);
1365 static int vhost_vdpa_set_vring_num(struct vhost_dev
*dev
,
1366 struct vhost_vring_state
*ring
)
1368 trace_vhost_vdpa_set_vring_num(dev
, ring
->index
, ring
->num
);
1369 return vhost_vdpa_call(dev
, VHOST_SET_VRING_NUM
, ring
);
1372 static int vhost_vdpa_set_vring_base(struct vhost_dev
*dev
,
1373 struct vhost_vring_state
*ring
)
1375 struct vhost_vdpa
*v
= dev
->opaque
;
1377 if (v
->shadow_vqs_enabled
) {
1379 * Device vring base was set at device start. SVQ base is handled by
1385 return vhost_vdpa_set_dev_vring_base(dev
, ring
);
1388 static int vhost_vdpa_get_vring_base(struct vhost_dev
*dev
,
1389 struct vhost_vring_state
*ring
)
1391 struct vhost_vdpa
*v
= dev
->opaque
;
1394 if (v
->shadow_vqs_enabled
) {
1395 ring
->num
= virtio_queue_get_last_avail_idx(dev
->vdev
, ring
->index
);
1399 if (!v
->suspended
) {
1401 * Cannot trust in value returned by device, let vhost recover used
1407 ret
= vhost_vdpa_call(dev
, VHOST_GET_VRING_BASE
, ring
);
1408 trace_vhost_vdpa_get_vring_base(dev
, ring
->index
, ring
->num
);
1412 static int vhost_vdpa_set_vring_kick(struct vhost_dev
*dev
,
1413 struct vhost_vring_file
*file
)
1415 struct vhost_vdpa
*v
= dev
->opaque
;
1416 int vdpa_idx
= file
->index
- dev
->vq_index
;
1418 if (v
->shadow_vqs_enabled
) {
1419 VhostShadowVirtqueue
*svq
= g_ptr_array_index(v
->shadow_vqs
, vdpa_idx
);
1420 vhost_svq_set_svq_kick_fd(svq
, file
->fd
);
1423 return vhost_vdpa_set_vring_dev_kick(dev
, file
);
1427 static int vhost_vdpa_set_vring_call(struct vhost_dev
*dev
,
1428 struct vhost_vring_file
*file
)
1430 struct vhost_vdpa
*v
= dev
->opaque
;
1431 int vdpa_idx
= file
->index
- dev
->vq_index
;
1432 VhostShadowVirtqueue
*svq
= g_ptr_array_index(v
->shadow_vqs
, vdpa_idx
);
1434 /* Remember last call fd because we can switch to SVQ anytime. */
1435 vhost_svq_set_svq_call_fd(svq
, file
->fd
);
1436 if (v
->shadow_vqs_enabled
) {
1440 return vhost_vdpa_set_vring_dev_call(dev
, file
);
1443 static int vhost_vdpa_get_features(struct vhost_dev
*dev
,
1446 int ret
= vhost_vdpa_get_dev_features(dev
, features
);
1449 /* Add SVQ logging capabilities */
1450 *features
|= BIT_ULL(VHOST_F_LOG_ALL
);
1456 static int vhost_vdpa_set_owner(struct vhost_dev
*dev
)
1458 if (!vhost_vdpa_first_dev(dev
)) {
1462 trace_vhost_vdpa_set_owner(dev
);
1463 return vhost_vdpa_call(dev
, VHOST_SET_OWNER
, NULL
);
1466 static int vhost_vdpa_vq_get_addr(struct vhost_dev
*dev
,
1467 struct vhost_vring_addr
*addr
, struct vhost_virtqueue
*vq
)
1469 assert(dev
->vhost_ops
->backend_type
== VHOST_BACKEND_TYPE_VDPA
);
1470 addr
->desc_user_addr
= (uint64_t)(unsigned long)vq
->desc_phys
;
1471 addr
->avail_user_addr
= (uint64_t)(unsigned long)vq
->avail_phys
;
1472 addr
->used_user_addr
= (uint64_t)(unsigned long)vq
->used_phys
;
1473 trace_vhost_vdpa_vq_get_addr(dev
, vq
, addr
->desc_user_addr
,
1474 addr
->avail_user_addr
, addr
->used_user_addr
);
1478 static bool vhost_vdpa_force_iommu(struct vhost_dev
*dev
)
1483 const VhostOps vdpa_ops
= {
1484 .backend_type
= VHOST_BACKEND_TYPE_VDPA
,
1485 .vhost_backend_init
= vhost_vdpa_init
,
1486 .vhost_backend_cleanup
= vhost_vdpa_cleanup
,
1487 .vhost_set_log_base
= vhost_vdpa_set_log_base
,
1488 .vhost_set_vring_addr
= vhost_vdpa_set_vring_addr
,
1489 .vhost_set_vring_num
= vhost_vdpa_set_vring_num
,
1490 .vhost_set_vring_base
= vhost_vdpa_set_vring_base
,
1491 .vhost_get_vring_base
= vhost_vdpa_get_vring_base
,
1492 .vhost_set_vring_kick
= vhost_vdpa_set_vring_kick
,
1493 .vhost_set_vring_call
= vhost_vdpa_set_vring_call
,
1494 .vhost_get_features
= vhost_vdpa_get_features
,
1495 .vhost_set_backend_cap
= vhost_vdpa_set_backend_cap
,
1496 .vhost_set_owner
= vhost_vdpa_set_owner
,
1497 .vhost_set_vring_endian
= NULL
,
1498 .vhost_backend_memslots_limit
= vhost_vdpa_memslots_limit
,
1499 .vhost_set_mem_table
= vhost_vdpa_set_mem_table
,
1500 .vhost_set_features
= vhost_vdpa_set_features
,
1501 .vhost_reset_device
= vhost_vdpa_reset_device
,
1502 .vhost_get_vq_index
= vhost_vdpa_get_vq_index
,
1503 .vhost_get_config
= vhost_vdpa_get_config
,
1504 .vhost_set_config
= vhost_vdpa_set_config
,
1505 .vhost_requires_shm_log
= NULL
,
1506 .vhost_migration_done
= NULL
,
1507 .vhost_backend_can_merge
= NULL
,
1508 .vhost_net_set_mtu
= NULL
,
1509 .vhost_set_iotlb_callback
= NULL
,
1510 .vhost_send_device_iotlb_msg
= NULL
,
1511 .vhost_dev_start
= vhost_vdpa_dev_start
,
1512 .vhost_get_device_id
= vhost_vdpa_get_device_id
,
1513 .vhost_vq_get_addr
= vhost_vdpa_vq_get_addr
,
1514 .vhost_force_iommu
= vhost_vdpa_force_iommu
,
1515 .vhost_set_config_call
= vhost_vdpa_set_config_call
,
1516 .vhost_reset_status
= vhost_vdpa_reset_status
,