4 * Copyright(c) 2017-2018 Intel Corporation.
5 * Copyright(c) 2020 Red Hat, Inc.
7 * This work is licensed under the terms of the GNU GPL, version 2 or later.
8 * See the COPYING file in the top-level directory.
12 #include "qemu/osdep.h"
13 #include <linux/vhost.h>
14 #include <linux/vfio.h>
15 #include <sys/eventfd.h>
16 #include <sys/ioctl.h>
17 #include "hw/virtio/vhost.h"
18 #include "hw/virtio/vhost-backend.h"
19 #include "hw/virtio/virtio-net.h"
20 #include "hw/virtio/vhost-shadow-virtqueue.h"
21 #include "hw/virtio/vhost-vdpa.h"
22 #include "exec/address-spaces.h"
23 #include "migration/blocker.h"
24 #include "qemu/cutils.h"
25 #include "qemu/main-loop.h"
28 #include "qapi/error.h"
29 #include "hw/virtio/virtio-access.h"
32 * Return one past the end of the end of section. Be careful with uint64_t
35 static Int128
vhost_vdpa_section_end(const MemoryRegionSection
*section
)
37 Int128 llend
= int128_make64(section
->offset_within_address_space
);
38 llend
= int128_add(llend
, section
->size
);
39 llend
= int128_and(llend
, int128_exts64(TARGET_PAGE_MASK
));
44 static bool vhost_vdpa_listener_skipped_section(MemoryRegionSection
*section
,
50 if ((!memory_region_is_ram(section
->mr
) &&
51 !memory_region_is_iommu(section
->mr
)) ||
52 memory_region_is_protected(section
->mr
) ||
53 /* vhost-vDPA doesn't allow MMIO to be mapped */
54 memory_region_is_ram_device(section
->mr
)) {
58 if (section
->offset_within_address_space
< iova_min
) {
59 error_report("RAM section out of device range (min=0x%" PRIx64
60 ", addr=0x%" HWADDR_PRIx
")",
61 iova_min
, section
->offset_within_address_space
);
65 * While using vIOMMU, sometimes the section will be larger than iova_max,
66 * but the memory that actually maps is smaller, so move the check to
67 * function vhost_vdpa_iommu_map_notify(). That function will use the actual
68 * size that maps to the kernel
71 if (!memory_region_is_iommu(section
->mr
)) {
72 llend
= vhost_vdpa_section_end(section
);
73 if (int128_gt(llend
, int128_make64(iova_max
))) {
74 error_report("RAM section out of device range (max=0x%" PRIx64
75 ", end addr=0x%" PRIx64
")",
76 iova_max
, int128_get64(llend
));
85 * The caller must set asid = 0 if the device does not support asid.
86 * This is not an ABI break since it is set to 0 by the initializer anyway.
88 int vhost_vdpa_dma_map(struct vhost_vdpa
*v
, uint32_t asid
, hwaddr iova
,
89 hwaddr size
, void *vaddr
, bool readonly
)
91 struct vhost_msg_v2 msg
= {};
92 int fd
= v
->device_fd
;
95 msg
.type
= v
->msg_type
;
97 msg
.iotlb
.iova
= iova
;
98 msg
.iotlb
.size
= size
;
99 msg
.iotlb
.uaddr
= (uint64_t)(uintptr_t)vaddr
;
100 msg
.iotlb
.perm
= readonly
? VHOST_ACCESS_RO
: VHOST_ACCESS_RW
;
101 msg
.iotlb
.type
= VHOST_IOTLB_UPDATE
;
103 trace_vhost_vdpa_dma_map(v
, fd
, msg
.type
, msg
.asid
, msg
.iotlb
.iova
,
104 msg
.iotlb
.size
, msg
.iotlb
.uaddr
, msg
.iotlb
.perm
,
107 if (write(fd
, &msg
, sizeof(msg
)) != sizeof(msg
)) {
108 error_report("failed to write, fd=%d, errno=%d (%s)",
109 fd
, errno
, strerror(errno
));
117 * The caller must set asid = 0 if the device does not support asid.
118 * This is not an ABI break since it is set to 0 by the initializer anyway.
120 int vhost_vdpa_dma_unmap(struct vhost_vdpa
*v
, uint32_t asid
, hwaddr iova
,
123 struct vhost_msg_v2 msg
= {};
124 int fd
= v
->device_fd
;
127 msg
.type
= v
->msg_type
;
129 msg
.iotlb
.iova
= iova
;
130 msg
.iotlb
.size
= size
;
131 msg
.iotlb
.type
= VHOST_IOTLB_INVALIDATE
;
133 trace_vhost_vdpa_dma_unmap(v
, fd
, msg
.type
, msg
.asid
, msg
.iotlb
.iova
,
134 msg
.iotlb
.size
, msg
.iotlb
.type
);
136 if (write(fd
, &msg
, sizeof(msg
)) != sizeof(msg
)) {
137 error_report("failed to write, fd=%d, errno=%d (%s)",
138 fd
, errno
, strerror(errno
));
145 static void vhost_vdpa_listener_begin_batch(struct vhost_vdpa
*v
)
147 int fd
= v
->device_fd
;
148 struct vhost_msg_v2 msg
= {
150 .iotlb
.type
= VHOST_IOTLB_BATCH_BEGIN
,
153 trace_vhost_vdpa_listener_begin_batch(v
, fd
, msg
.type
, msg
.iotlb
.type
);
154 if (write(fd
, &msg
, sizeof(msg
)) != sizeof(msg
)) {
155 error_report("failed to write, fd=%d, errno=%d (%s)",
156 fd
, errno
, strerror(errno
));
160 static void vhost_vdpa_iotlb_batch_begin_once(struct vhost_vdpa
*v
)
162 if (v
->dev
->backend_cap
& (0x1ULL
<< VHOST_BACKEND_F_IOTLB_BATCH
) &&
163 !v
->iotlb_batch_begin_sent
) {
164 vhost_vdpa_listener_begin_batch(v
);
167 v
->iotlb_batch_begin_sent
= true;
170 static void vhost_vdpa_listener_commit(MemoryListener
*listener
)
172 struct vhost_vdpa
*v
= container_of(listener
, struct vhost_vdpa
, listener
);
173 struct vhost_dev
*dev
= v
->dev
;
174 struct vhost_msg_v2 msg
= {};
175 int fd
= v
->device_fd
;
177 if (!(dev
->backend_cap
& (0x1ULL
<< VHOST_BACKEND_F_IOTLB_BATCH
))) {
181 if (!v
->iotlb_batch_begin_sent
) {
185 msg
.type
= v
->msg_type
;
186 msg
.iotlb
.type
= VHOST_IOTLB_BATCH_END
;
188 trace_vhost_vdpa_listener_commit(v
, fd
, msg
.type
, msg
.iotlb
.type
);
189 if (write(fd
, &msg
, sizeof(msg
)) != sizeof(msg
)) {
190 error_report("failed to write, fd=%d, errno=%d (%s)",
191 fd
, errno
, strerror(errno
));
194 v
->iotlb_batch_begin_sent
= false;
197 static void vhost_vdpa_iommu_map_notify(IOMMUNotifier
*n
, IOMMUTLBEntry
*iotlb
)
199 struct vdpa_iommu
*iommu
= container_of(n
, struct vdpa_iommu
, n
);
201 hwaddr iova
= iotlb
->iova
+ iommu
->iommu_offset
;
202 struct vhost_vdpa
*v
= iommu
->dev
;
207 if (iotlb
->target_as
!= &address_space_memory
) {
208 error_report("Wrong target AS \"%s\", only system memory is allowed",
209 iotlb
->target_as
->name
? iotlb
->target_as
->name
: "none");
212 RCU_READ_LOCK_GUARD();
213 /* check if RAM section out of device range */
214 llend
= int128_add(int128_makes64(iotlb
->addr_mask
), int128_makes64(iova
));
215 if (int128_gt(llend
, int128_make64(v
->iova_range
.last
))) {
216 error_report("RAM section out of device range (max=0x%" PRIx64
217 ", end addr=0x%" PRIx64
")",
218 v
->iova_range
.last
, int128_get64(llend
));
222 if ((iotlb
->perm
& IOMMU_RW
) != IOMMU_NONE
) {
225 if (!memory_get_xlat_addr(iotlb
, &vaddr
, NULL
, &read_only
, NULL
)) {
228 ret
= vhost_vdpa_dma_map(v
, VHOST_VDPA_GUEST_PA_ASID
, iova
,
229 iotlb
->addr_mask
+ 1, vaddr
, read_only
);
231 error_report("vhost_vdpa_dma_map(%p, 0x%" HWADDR_PRIx
", "
232 "0x%" HWADDR_PRIx
", %p) = %d (%m)",
233 v
, iova
, iotlb
->addr_mask
+ 1, vaddr
, ret
);
236 ret
= vhost_vdpa_dma_unmap(v
, VHOST_VDPA_GUEST_PA_ASID
, iova
,
237 iotlb
->addr_mask
+ 1);
239 error_report("vhost_vdpa_dma_unmap(%p, 0x%" HWADDR_PRIx
", "
240 "0x%" HWADDR_PRIx
") = %d (%m)",
241 v
, iova
, iotlb
->addr_mask
+ 1, ret
);
246 static void vhost_vdpa_iommu_region_add(MemoryListener
*listener
,
247 MemoryRegionSection
*section
)
249 struct vhost_vdpa
*v
= container_of(listener
, struct vhost_vdpa
, listener
);
251 struct vdpa_iommu
*iommu
;
254 IOMMUMemoryRegion
*iommu_mr
;
257 iommu_mr
= IOMMU_MEMORY_REGION(section
->mr
);
259 iommu
= g_malloc0(sizeof(*iommu
));
260 end
= int128_add(int128_make64(section
->offset_within_region
),
262 end
= int128_sub(end
, int128_one());
263 iommu_idx
= memory_region_iommu_attrs_to_index(iommu_mr
,
264 MEMTXATTRS_UNSPECIFIED
);
265 iommu
->iommu_mr
= iommu_mr
;
266 iommu_notifier_init(&iommu
->n
, vhost_vdpa_iommu_map_notify
,
267 IOMMU_NOTIFIER_IOTLB_EVENTS
,
268 section
->offset_within_region
,
271 iommu
->iommu_offset
= section
->offset_within_address_space
-
272 section
->offset_within_region
;
275 ret
= memory_region_register_iommu_notifier(section
->mr
, &iommu
->n
, NULL
);
281 QLIST_INSERT_HEAD(&v
->iommu_list
, iommu
, iommu_next
);
282 memory_region_iommu_replay(iommu
->iommu_mr
, &iommu
->n
);
287 static void vhost_vdpa_iommu_region_del(MemoryListener
*listener
,
288 MemoryRegionSection
*section
)
290 struct vhost_vdpa
*v
= container_of(listener
, struct vhost_vdpa
, listener
);
292 struct vdpa_iommu
*iommu
;
294 QLIST_FOREACH(iommu
, &v
->iommu_list
, iommu_next
)
296 if (MEMORY_REGION(iommu
->iommu_mr
) == section
->mr
&&
297 iommu
->n
.start
== section
->offset_within_region
) {
298 memory_region_unregister_iommu_notifier(section
->mr
, &iommu
->n
);
299 QLIST_REMOVE(iommu
, iommu_next
);
306 static void vhost_vdpa_listener_region_add(MemoryListener
*listener
,
307 MemoryRegionSection
*section
)
309 DMAMap mem_region
= {};
310 struct vhost_vdpa
*v
= container_of(listener
, struct vhost_vdpa
, listener
);
312 Int128 llend
, llsize
;
316 if (vhost_vdpa_listener_skipped_section(section
, v
->iova_range
.first
,
317 v
->iova_range
.last
)) {
320 if (memory_region_is_iommu(section
->mr
)) {
321 vhost_vdpa_iommu_region_add(listener
, section
);
325 if (unlikely((section
->offset_within_address_space
& ~TARGET_PAGE_MASK
) !=
326 (section
->offset_within_region
& ~TARGET_PAGE_MASK
))) {
327 error_report("%s received unaligned region", __func__
);
331 iova
= TARGET_PAGE_ALIGN(section
->offset_within_address_space
);
332 llend
= vhost_vdpa_section_end(section
);
333 if (int128_ge(int128_make64(iova
), llend
)) {
337 memory_region_ref(section
->mr
);
339 /* Here we assume that memory_region_is_ram(section->mr)==true */
341 vaddr
= memory_region_get_ram_ptr(section
->mr
) +
342 section
->offset_within_region
+
343 (iova
- section
->offset_within_address_space
);
345 trace_vhost_vdpa_listener_region_add(v
, iova
, int128_get64(llend
),
346 vaddr
, section
->readonly
);
348 llsize
= int128_sub(llend
, int128_make64(iova
));
349 if (v
->shadow_data
) {
352 mem_region
.translated_addr
= (hwaddr
)(uintptr_t)vaddr
,
353 mem_region
.size
= int128_get64(llsize
) - 1,
354 mem_region
.perm
= IOMMU_ACCESS_FLAG(true, section
->readonly
),
356 r
= vhost_iova_tree_map_alloc(v
->iova_tree
, &mem_region
);
357 if (unlikely(r
!= IOVA_OK
)) {
358 error_report("Can't allocate a mapping (%d)", r
);
362 iova
= mem_region
.iova
;
365 vhost_vdpa_iotlb_batch_begin_once(v
);
366 ret
= vhost_vdpa_dma_map(v
, VHOST_VDPA_GUEST_PA_ASID
, iova
,
367 int128_get64(llsize
), vaddr
, section
->readonly
);
369 error_report("vhost vdpa map fail!");
376 if (v
->shadow_data
) {
377 vhost_iova_tree_remove(v
->iova_tree
, mem_region
);
382 * On the initfn path, store the first error in the container so we
383 * can gracefully fail. Runtime, there's not much we can do other
384 * than throw a hardware error.
386 error_report("vhost-vdpa: DMA mapping failed, unable to continue");
391 static void vhost_vdpa_listener_region_del(MemoryListener
*listener
,
392 MemoryRegionSection
*section
)
394 struct vhost_vdpa
*v
= container_of(listener
, struct vhost_vdpa
, listener
);
396 Int128 llend
, llsize
;
399 if (vhost_vdpa_listener_skipped_section(section
, v
->iova_range
.first
,
400 v
->iova_range
.last
)) {
403 if (memory_region_is_iommu(section
->mr
)) {
404 vhost_vdpa_iommu_region_del(listener
, section
);
407 if (unlikely((section
->offset_within_address_space
& ~TARGET_PAGE_MASK
) !=
408 (section
->offset_within_region
& ~TARGET_PAGE_MASK
))) {
409 error_report("%s received unaligned region", __func__
);
413 iova
= TARGET_PAGE_ALIGN(section
->offset_within_address_space
);
414 llend
= vhost_vdpa_section_end(section
);
416 trace_vhost_vdpa_listener_region_del(v
, iova
,
417 int128_get64(int128_sub(llend
, int128_one())));
419 if (int128_ge(int128_make64(iova
), llend
)) {
423 llsize
= int128_sub(llend
, int128_make64(iova
));
425 if (v
->shadow_data
) {
426 const DMAMap
*result
;
427 const void *vaddr
= memory_region_get_ram_ptr(section
->mr
) +
428 section
->offset_within_region
+
429 (iova
- section
->offset_within_address_space
);
430 DMAMap mem_region
= {
431 .translated_addr
= (hwaddr
)(uintptr_t)vaddr
,
432 .size
= int128_get64(llsize
) - 1,
435 result
= vhost_iova_tree_find_iova(v
->iova_tree
, &mem_region
);
437 /* The memory listener map wasn't mapped */
441 vhost_iova_tree_remove(v
->iova_tree
, *result
);
443 vhost_vdpa_iotlb_batch_begin_once(v
);
445 * The unmap ioctl doesn't accept a full 64-bit. need to check it
447 if (int128_eq(llsize
, int128_2_64())) {
448 llsize
= int128_rshift(llsize
, 1);
449 ret
= vhost_vdpa_dma_unmap(v
, VHOST_VDPA_GUEST_PA_ASID
, iova
,
450 int128_get64(llsize
));
453 error_report("vhost_vdpa_dma_unmap(%p, 0x%" HWADDR_PRIx
", "
454 "0x%" HWADDR_PRIx
") = %d (%m)",
455 v
, iova
, int128_get64(llsize
), ret
);
457 iova
+= int128_get64(llsize
);
459 ret
= vhost_vdpa_dma_unmap(v
, VHOST_VDPA_GUEST_PA_ASID
, iova
,
460 int128_get64(llsize
));
463 error_report("vhost_vdpa_dma_unmap(%p, 0x%" HWADDR_PRIx
", "
464 "0x%" HWADDR_PRIx
") = %d (%m)",
465 v
, iova
, int128_get64(llsize
), ret
);
468 memory_region_unref(section
->mr
);
471 * IOTLB API is used by vhost-vdpa which requires incremental updating
472 * of the mapping. So we can not use generic vhost memory listener which
473 * depends on the addnop().
475 static const MemoryListener vhost_vdpa_memory_listener
= {
476 .name
= "vhost-vdpa",
477 .commit
= vhost_vdpa_listener_commit
,
478 .region_add
= vhost_vdpa_listener_region_add
,
479 .region_del
= vhost_vdpa_listener_region_del
,
482 static int vhost_vdpa_call(struct vhost_dev
*dev
, unsigned long int request
,
485 struct vhost_vdpa
*v
= dev
->opaque
;
486 int fd
= v
->device_fd
;
489 assert(dev
->vhost_ops
->backend_type
== VHOST_BACKEND_TYPE_VDPA
);
491 ret
= ioctl(fd
, request
, arg
);
492 return ret
< 0 ? -errno
: ret
;
495 static int vhost_vdpa_add_status(struct vhost_dev
*dev
, uint8_t status
)
500 trace_vhost_vdpa_add_status(dev
, status
);
501 ret
= vhost_vdpa_call(dev
, VHOST_VDPA_GET_STATUS
, &s
);
508 ret
= vhost_vdpa_call(dev
, VHOST_VDPA_SET_STATUS
, &s
);
513 ret
= vhost_vdpa_call(dev
, VHOST_VDPA_GET_STATUS
, &s
);
525 int vhost_vdpa_get_iova_range(int fd
, struct vhost_vdpa_iova_range
*iova_range
)
527 int ret
= ioctl(fd
, VHOST_VDPA_GET_IOVA_RANGE
, iova_range
);
529 return ret
< 0 ? -errno
: 0;
533 * The use of this function is for requests that only need to be
534 * applied once. Typically such request occurs at the beginning
535 * of operation, and before setting up queues. It should not be
536 * used for request that performs operation until all queues are
537 * set, which would need to check dev->vq_index_end instead.
539 static bool vhost_vdpa_first_dev(struct vhost_dev
*dev
)
541 struct vhost_vdpa
*v
= dev
->opaque
;
543 return v
->index
== 0;
546 static int vhost_vdpa_get_dev_features(struct vhost_dev
*dev
,
551 ret
= vhost_vdpa_call(dev
, VHOST_GET_FEATURES
, features
);
552 trace_vhost_vdpa_get_features(dev
, *features
);
556 static void vhost_vdpa_init_svq(struct vhost_dev
*hdev
, struct vhost_vdpa
*v
)
558 g_autoptr(GPtrArray
) shadow_vqs
= NULL
;
560 shadow_vqs
= g_ptr_array_new_full(hdev
->nvqs
, vhost_svq_free
);
561 for (unsigned n
= 0; n
< hdev
->nvqs
; ++n
) {
562 VhostShadowVirtqueue
*svq
;
564 svq
= vhost_svq_new(v
->shadow_vq_ops
, v
->shadow_vq_ops_opaque
);
565 g_ptr_array_add(shadow_vqs
, svq
);
568 v
->shadow_vqs
= g_steal_pointer(&shadow_vqs
);
571 static int vhost_vdpa_init(struct vhost_dev
*dev
, void *opaque
, Error
**errp
)
573 struct vhost_vdpa
*v
;
574 assert(dev
->vhost_ops
->backend_type
== VHOST_BACKEND_TYPE_VDPA
);
575 trace_vhost_vdpa_init(dev
, opaque
);
580 dev
->opaque
= opaque
;
581 v
->listener
= vhost_vdpa_memory_listener
;
582 v
->msg_type
= VHOST_IOTLB_MSG_V2
;
583 vhost_vdpa_init_svq(dev
, v
);
585 error_propagate(&dev
->migration_blocker
, v
->migration_blocker
);
586 if (!vhost_vdpa_first_dev(dev
)) {
591 * If dev->shadow_vqs_enabled at initialization that means the device has
592 * been started with x-svq=on, so don't block migration
594 if (dev
->migration_blocker
== NULL
&& !v
->shadow_vqs_enabled
) {
595 /* We don't have dev->features yet */
597 ret
= vhost_vdpa_get_dev_features(dev
, &features
);
599 error_setg_errno(errp
, -ret
, "Could not get device features");
602 vhost_svq_valid_features(features
, &dev
->migration_blocker
);
606 * Similar to VFIO, we end up pinning all guest memory and have to
607 * disable discarding of RAM.
609 ret
= ram_block_discard_disable(true);
611 error_report("Cannot set discarding of RAM broken");
615 vhost_vdpa_add_status(dev
, VIRTIO_CONFIG_S_ACKNOWLEDGE
|
616 VIRTIO_CONFIG_S_DRIVER
);
621 static void vhost_vdpa_host_notifier_uninit(struct vhost_dev
*dev
,
624 size_t page_size
= qemu_real_host_page_size();
625 struct vhost_vdpa
*v
= dev
->opaque
;
626 VirtIODevice
*vdev
= dev
->vdev
;
627 VhostVDPAHostNotifier
*n
;
629 n
= &v
->notifier
[queue_index
];
632 virtio_queue_set_host_notifier_mr(vdev
, queue_index
, &n
->mr
, false);
633 object_unparent(OBJECT(&n
->mr
));
634 munmap(n
->addr
, page_size
);
639 static int vhost_vdpa_host_notifier_init(struct vhost_dev
*dev
, int queue_index
)
641 size_t page_size
= qemu_real_host_page_size();
642 struct vhost_vdpa
*v
= dev
->opaque
;
643 VirtIODevice
*vdev
= dev
->vdev
;
644 VhostVDPAHostNotifier
*n
;
645 int fd
= v
->device_fd
;
649 vhost_vdpa_host_notifier_uninit(dev
, queue_index
);
651 n
= &v
->notifier
[queue_index
];
653 addr
= mmap(NULL
, page_size
, PROT_WRITE
, MAP_SHARED
, fd
,
654 queue_index
* page_size
);
655 if (addr
== MAP_FAILED
) {
659 name
= g_strdup_printf("vhost-vdpa/host-notifier@%p mmaps[%d]",
661 memory_region_init_ram_device_ptr(&n
->mr
, OBJECT(vdev
), name
,
665 if (virtio_queue_set_host_notifier_mr(vdev
, queue_index
, &n
->mr
, true)) {
666 object_unparent(OBJECT(&n
->mr
));
667 munmap(addr
, page_size
);
678 static void vhost_vdpa_host_notifiers_uninit(struct vhost_dev
*dev
, int n
)
683 * Pack all the changes to the memory regions in a single
684 * transaction to avoid a few updating of the address space
687 memory_region_transaction_begin();
689 for (i
= dev
->vq_index
; i
< dev
->vq_index
+ n
; i
++) {
690 vhost_vdpa_host_notifier_uninit(dev
, i
);
693 memory_region_transaction_commit();
696 static void vhost_vdpa_host_notifiers_init(struct vhost_dev
*dev
)
698 struct vhost_vdpa
*v
= dev
->opaque
;
701 if (v
->shadow_vqs_enabled
) {
702 /* FIXME SVQ is not compatible with host notifiers mr */
707 * Pack all the changes to the memory regions in a single
708 * transaction to avoid a few updating of the address space
711 memory_region_transaction_begin();
713 for (i
= dev
->vq_index
; i
< dev
->vq_index
+ dev
->nvqs
; i
++) {
714 if (vhost_vdpa_host_notifier_init(dev
, i
)) {
715 vhost_vdpa_host_notifiers_uninit(dev
, i
- dev
->vq_index
);
720 memory_region_transaction_commit();
723 static void vhost_vdpa_svq_cleanup(struct vhost_dev
*dev
)
725 struct vhost_vdpa
*v
= dev
->opaque
;
728 for (idx
= 0; idx
< v
->shadow_vqs
->len
; ++idx
) {
729 vhost_svq_stop(g_ptr_array_index(v
->shadow_vqs
, idx
));
731 g_ptr_array_free(v
->shadow_vqs
, true);
734 static int vhost_vdpa_cleanup(struct vhost_dev
*dev
)
736 struct vhost_vdpa
*v
;
737 assert(dev
->vhost_ops
->backend_type
== VHOST_BACKEND_TYPE_VDPA
);
739 trace_vhost_vdpa_cleanup(dev
, v
);
740 if (vhost_vdpa_first_dev(dev
)) {
741 ram_block_discard_disable(false);
744 vhost_vdpa_host_notifiers_uninit(dev
, dev
->nvqs
);
745 memory_listener_unregister(&v
->listener
);
746 vhost_vdpa_svq_cleanup(dev
);
753 static int vhost_vdpa_memslots_limit(struct vhost_dev
*dev
)
755 trace_vhost_vdpa_memslots_limit(dev
, INT_MAX
);
759 static int vhost_vdpa_set_mem_table(struct vhost_dev
*dev
,
760 struct vhost_memory
*mem
)
762 if (!vhost_vdpa_first_dev(dev
)) {
766 trace_vhost_vdpa_set_mem_table(dev
, mem
->nregions
, mem
->padding
);
767 if (trace_event_get_state_backends(TRACE_VHOST_VDPA_SET_MEM_TABLE
) &&
768 trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_REGIONS
)) {
770 for (i
= 0; i
< mem
->nregions
; i
++) {
771 trace_vhost_vdpa_dump_regions(dev
, i
,
772 mem
->regions
[i
].guest_phys_addr
,
773 mem
->regions
[i
].memory_size
,
774 mem
->regions
[i
].userspace_addr
,
775 mem
->regions
[i
].flags_padding
);
785 static int vhost_vdpa_set_features(struct vhost_dev
*dev
,
788 struct vhost_vdpa
*v
= dev
->opaque
;
791 if (!vhost_vdpa_first_dev(dev
)) {
795 if (v
->shadow_vqs_enabled
) {
796 if ((v
->acked_features
^ features
) == BIT_ULL(VHOST_F_LOG_ALL
)) {
798 * QEMU is just trying to enable or disable logging. SVQ handles
799 * this sepparately, so no need to forward this.
801 v
->acked_features
= features
;
805 v
->acked_features
= features
;
807 /* We must not ack _F_LOG if SVQ is enabled */
808 features
&= ~BIT_ULL(VHOST_F_LOG_ALL
);
811 trace_vhost_vdpa_set_features(dev
, features
);
812 ret
= vhost_vdpa_call(dev
, VHOST_SET_FEATURES
, &features
);
817 return vhost_vdpa_add_status(dev
, VIRTIO_CONFIG_S_FEATURES_OK
);
820 static int vhost_vdpa_set_backend_cap(struct vhost_dev
*dev
)
823 uint64_t f
= 0x1ULL
<< VHOST_BACKEND_F_IOTLB_MSG_V2
|
824 0x1ULL
<< VHOST_BACKEND_F_IOTLB_BATCH
|
825 0x1ULL
<< VHOST_BACKEND_F_IOTLB_ASID
|
826 0x1ULL
<< VHOST_BACKEND_F_SUSPEND
;
829 if (vhost_vdpa_call(dev
, VHOST_GET_BACKEND_FEATURES
, &features
)) {
835 if (vhost_vdpa_first_dev(dev
)) {
836 r
= vhost_vdpa_call(dev
, VHOST_SET_BACKEND_FEATURES
, &features
);
842 dev
->backend_cap
= features
;
847 static int vhost_vdpa_get_device_id(struct vhost_dev
*dev
,
851 ret
= vhost_vdpa_call(dev
, VHOST_VDPA_GET_DEVICE_ID
, device_id
);
852 trace_vhost_vdpa_get_device_id(dev
, *device_id
);
856 static int vhost_vdpa_reset_device(struct vhost_dev
*dev
)
858 struct vhost_vdpa
*v
= dev
->opaque
;
862 ret
= vhost_vdpa_call(dev
, VHOST_VDPA_SET_STATUS
, &status
);
863 trace_vhost_vdpa_reset_device(dev
, status
);
864 v
->suspended
= false;
868 static int vhost_vdpa_get_vq_index(struct vhost_dev
*dev
, int idx
)
870 assert(idx
>= dev
->vq_index
&& idx
< dev
->vq_index
+ dev
->nvqs
);
872 trace_vhost_vdpa_get_vq_index(dev
, idx
, idx
);
876 static int vhost_vdpa_set_vring_ready(struct vhost_dev
*dev
)
879 trace_vhost_vdpa_set_vring_ready(dev
);
880 for (i
= 0; i
< dev
->nvqs
; ++i
) {
881 struct vhost_vring_state state
= {
882 .index
= dev
->vq_index
+ i
,
885 vhost_vdpa_call(dev
, VHOST_VDPA_SET_VRING_ENABLE
, &state
);
890 static int vhost_vdpa_set_config_call(struct vhost_dev
*dev
,
893 trace_vhost_vdpa_set_config_call(dev
, fd
);
894 return vhost_vdpa_call(dev
, VHOST_VDPA_SET_CONFIG_CALL
, &fd
);
897 static void vhost_vdpa_dump_config(struct vhost_dev
*dev
, const uint8_t *config
,
901 char line
[QEMU_HEXDUMP_LINE_LEN
];
903 for (b
= 0; b
< config_len
; b
+= 16) {
904 len
= config_len
- b
;
905 qemu_hexdump_line(line
, b
, config
, len
, false);
906 trace_vhost_vdpa_dump_config(dev
, line
);
910 static int vhost_vdpa_set_config(struct vhost_dev
*dev
, const uint8_t *data
,
911 uint32_t offset
, uint32_t size
,
914 struct vhost_vdpa_config
*config
;
916 unsigned long config_size
= offsetof(struct vhost_vdpa_config
, buf
);
918 trace_vhost_vdpa_set_config(dev
, offset
, size
, flags
);
919 config
= g_malloc(size
+ config_size
);
920 config
->off
= offset
;
922 memcpy(config
->buf
, data
, size
);
923 if (trace_event_get_state_backends(TRACE_VHOST_VDPA_SET_CONFIG
) &&
924 trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_CONFIG
)) {
925 vhost_vdpa_dump_config(dev
, data
, size
);
927 ret
= vhost_vdpa_call(dev
, VHOST_VDPA_SET_CONFIG
, config
);
932 static int vhost_vdpa_get_config(struct vhost_dev
*dev
, uint8_t *config
,
933 uint32_t config_len
, Error
**errp
)
935 struct vhost_vdpa_config
*v_config
;
936 unsigned long config_size
= offsetof(struct vhost_vdpa_config
, buf
);
939 trace_vhost_vdpa_get_config(dev
, config
, config_len
);
940 v_config
= g_malloc(config_len
+ config_size
);
941 v_config
->len
= config_len
;
943 ret
= vhost_vdpa_call(dev
, VHOST_VDPA_GET_CONFIG
, v_config
);
944 memcpy(config
, v_config
->buf
, config_len
);
946 if (trace_event_get_state_backends(TRACE_VHOST_VDPA_GET_CONFIG
) &&
947 trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_CONFIG
)) {
948 vhost_vdpa_dump_config(dev
, config
, config_len
);
953 static int vhost_vdpa_set_dev_vring_base(struct vhost_dev
*dev
,
954 struct vhost_vring_state
*ring
)
956 trace_vhost_vdpa_set_vring_base(dev
, ring
->index
, ring
->num
);
957 return vhost_vdpa_call(dev
, VHOST_SET_VRING_BASE
, ring
);
960 static int vhost_vdpa_set_vring_dev_kick(struct vhost_dev
*dev
,
961 struct vhost_vring_file
*file
)
963 trace_vhost_vdpa_set_vring_kick(dev
, file
->index
, file
->fd
);
964 return vhost_vdpa_call(dev
, VHOST_SET_VRING_KICK
, file
);
967 static int vhost_vdpa_set_vring_dev_call(struct vhost_dev
*dev
,
968 struct vhost_vring_file
*file
)
970 trace_vhost_vdpa_set_vring_call(dev
, file
->index
, file
->fd
);
971 return vhost_vdpa_call(dev
, VHOST_SET_VRING_CALL
, file
);
974 static int vhost_vdpa_set_vring_dev_addr(struct vhost_dev
*dev
,
975 struct vhost_vring_addr
*addr
)
977 trace_vhost_vdpa_set_vring_addr(dev
, addr
->index
, addr
->flags
,
978 addr
->desc_user_addr
, addr
->used_user_addr
,
979 addr
->avail_user_addr
,
980 addr
->log_guest_addr
);
982 return vhost_vdpa_call(dev
, VHOST_SET_VRING_ADDR
, addr
);
987 * Set the shadow virtqueue descriptors to the device
989 * @dev: The vhost device model
990 * @svq: The shadow virtqueue
991 * @idx: The index of the virtqueue in the vhost device
994 * Note that this function does not rewind kick file descriptor if cannot set
997 static int vhost_vdpa_svq_set_fds(struct vhost_dev
*dev
,
998 VhostShadowVirtqueue
*svq
, unsigned idx
,
1001 struct vhost_vring_file file
= {
1002 .index
= dev
->vq_index
+ idx
,
1004 const EventNotifier
*event_notifier
= &svq
->hdev_kick
;
1007 r
= event_notifier_init(&svq
->hdev_kick
, 0);
1009 error_setg_errno(errp
, -r
, "Couldn't create kick event notifier");
1010 goto err_init_hdev_kick
;
1013 r
= event_notifier_init(&svq
->hdev_call
, 0);
1015 error_setg_errno(errp
, -r
, "Couldn't create call event notifier");
1016 goto err_init_hdev_call
;
1019 file
.fd
= event_notifier_get_fd(event_notifier
);
1020 r
= vhost_vdpa_set_vring_dev_kick(dev
, &file
);
1021 if (unlikely(r
!= 0)) {
1022 error_setg_errno(errp
, -r
, "Can't set device kick fd");
1023 goto err_init_set_dev_fd
;
1026 event_notifier
= &svq
->hdev_call
;
1027 file
.fd
= event_notifier_get_fd(event_notifier
);
1028 r
= vhost_vdpa_set_vring_dev_call(dev
, &file
);
1029 if (unlikely(r
!= 0)) {
1030 error_setg_errno(errp
, -r
, "Can't set device call fd");
1031 goto err_init_set_dev_fd
;
1036 err_init_set_dev_fd
:
1037 event_notifier_set_handler(&svq
->hdev_call
, NULL
);
1040 event_notifier_cleanup(&svq
->hdev_kick
);
1047 * Unmap a SVQ area in the device
1049 static void vhost_vdpa_svq_unmap_ring(struct vhost_vdpa
*v
, hwaddr addr
)
1051 const DMAMap needle
= {
1052 .translated_addr
= addr
,
1054 const DMAMap
*result
= vhost_iova_tree_find_iova(v
->iova_tree
, &needle
);
1058 if (unlikely(!result
)) {
1059 error_report("Unable to find SVQ address to unmap");
1063 size
= ROUND_UP(result
->size
, qemu_real_host_page_size());
1064 r
= vhost_vdpa_dma_unmap(v
, v
->address_space_id
, result
->iova
, size
);
1065 if (unlikely(r
< 0)) {
1066 error_report("Unable to unmap SVQ vring: %s (%d)", g_strerror(-r
), -r
);
1070 vhost_iova_tree_remove(v
->iova_tree
, *result
);
1073 static void vhost_vdpa_svq_unmap_rings(struct vhost_dev
*dev
,
1074 const VhostShadowVirtqueue
*svq
)
1076 struct vhost_vdpa
*v
= dev
->opaque
;
1077 struct vhost_vring_addr svq_addr
;
1079 vhost_svq_get_vring_addr(svq
, &svq_addr
);
1081 vhost_vdpa_svq_unmap_ring(v
, svq_addr
.desc_user_addr
);
1083 vhost_vdpa_svq_unmap_ring(v
, svq_addr
.used_user_addr
);
1087 * Map the SVQ area in the device
1089 * @v: Vhost-vdpa device
1090 * @needle: The area to search iova
1091 * @errorp: Error pointer
1093 static bool vhost_vdpa_svq_map_ring(struct vhost_vdpa
*v
, DMAMap
*needle
,
1098 r
= vhost_iova_tree_map_alloc(v
->iova_tree
, needle
);
1099 if (unlikely(r
!= IOVA_OK
)) {
1100 error_setg(errp
, "Cannot allocate iova (%d)", r
);
1104 r
= vhost_vdpa_dma_map(v
, v
->address_space_id
, needle
->iova
,
1106 (void *)(uintptr_t)needle
->translated_addr
,
1107 needle
->perm
== IOMMU_RO
);
1108 if (unlikely(r
!= 0)) {
1109 error_setg_errno(errp
, -r
, "Cannot map region to device");
1110 vhost_iova_tree_remove(v
->iova_tree
, *needle
);
1117 * Map the shadow virtqueue rings in the device
1119 * @dev: The vhost device
1120 * @svq: The shadow virtqueue
1121 * @addr: Assigned IOVA addresses
1122 * @errp: Error pointer
1124 static bool vhost_vdpa_svq_map_rings(struct vhost_dev
*dev
,
1125 const VhostShadowVirtqueue
*svq
,
1126 struct vhost_vring_addr
*addr
,
1130 DMAMap device_region
, driver_region
;
1131 struct vhost_vring_addr svq_addr
;
1132 struct vhost_vdpa
*v
= dev
->opaque
;
1133 size_t device_size
= vhost_svq_device_area_size(svq
);
1134 size_t driver_size
= vhost_svq_driver_area_size(svq
);
1135 size_t avail_offset
;
1138 vhost_svq_get_vring_addr(svq
, &svq_addr
);
1140 driver_region
= (DMAMap
) {
1141 .translated_addr
= svq_addr
.desc_user_addr
,
1142 .size
= driver_size
- 1,
1145 ok
= vhost_vdpa_svq_map_ring(v
, &driver_region
, errp
);
1146 if (unlikely(!ok
)) {
1147 error_prepend(errp
, "Cannot create vq driver region: ");
1150 addr
->desc_user_addr
= driver_region
.iova
;
1151 avail_offset
= svq_addr
.avail_user_addr
- svq_addr
.desc_user_addr
;
1152 addr
->avail_user_addr
= driver_region
.iova
+ avail_offset
;
1154 device_region
= (DMAMap
) {
1155 .translated_addr
= svq_addr
.used_user_addr
,
1156 .size
= device_size
- 1,
1159 ok
= vhost_vdpa_svq_map_ring(v
, &device_region
, errp
);
1160 if (unlikely(!ok
)) {
1161 error_prepend(errp
, "Cannot create vq device region: ");
1162 vhost_vdpa_svq_unmap_ring(v
, driver_region
.translated_addr
);
1164 addr
->used_user_addr
= device_region
.iova
;
1169 static bool vhost_vdpa_svq_setup(struct vhost_dev
*dev
,
1170 VhostShadowVirtqueue
*svq
, unsigned idx
,
1173 uint16_t vq_index
= dev
->vq_index
+ idx
;
1174 struct vhost_vring_state s
= {
1179 r
= vhost_vdpa_set_dev_vring_base(dev
, &s
);
1181 error_setg_errno(errp
, -r
, "Cannot set vring base");
1185 r
= vhost_vdpa_svq_set_fds(dev
, svq
, idx
, errp
);
1189 static bool vhost_vdpa_svqs_start(struct vhost_dev
*dev
)
1191 struct vhost_vdpa
*v
= dev
->opaque
;
1195 if (!v
->shadow_vqs_enabled
) {
1199 for (i
= 0; i
< v
->shadow_vqs
->len
; ++i
) {
1200 VirtQueue
*vq
= virtio_get_queue(dev
->vdev
, dev
->vq_index
+ i
);
1201 VhostShadowVirtqueue
*svq
= g_ptr_array_index(v
->shadow_vqs
, i
);
1202 struct vhost_vring_addr addr
= {
1203 .index
= dev
->vq_index
+ i
,
1206 bool ok
= vhost_vdpa_svq_setup(dev
, svq
, i
, &err
);
1207 if (unlikely(!ok
)) {
1211 vhost_svq_start(svq
, dev
->vdev
, vq
, v
->iova_tree
);
1212 ok
= vhost_vdpa_svq_map_rings(dev
, svq
, &addr
, &err
);
1213 if (unlikely(!ok
)) {
1217 /* Override vring GPA set by vhost subsystem */
1218 r
= vhost_vdpa_set_vring_dev_addr(dev
, &addr
);
1219 if (unlikely(r
!= 0)) {
1220 error_setg_errno(&err
, -r
, "Cannot set device address");
1228 vhost_vdpa_svq_unmap_rings(dev
, g_ptr_array_index(v
->shadow_vqs
, i
));
1231 vhost_svq_stop(g_ptr_array_index(v
->shadow_vqs
, i
));
1234 error_reportf_err(err
, "Cannot setup SVQ %u: ", i
);
1235 for (unsigned j
= 0; j
< i
; ++j
) {
1236 VhostShadowVirtqueue
*svq
= g_ptr_array_index(v
->shadow_vqs
, j
);
1237 vhost_vdpa_svq_unmap_rings(dev
, svq
);
1238 vhost_svq_stop(svq
);
1244 static void vhost_vdpa_svqs_stop(struct vhost_dev
*dev
)
1246 struct vhost_vdpa
*v
= dev
->opaque
;
1248 if (!v
->shadow_vqs_enabled
) {
1252 for (unsigned i
= 0; i
< v
->shadow_vqs
->len
; ++i
) {
1253 VhostShadowVirtqueue
*svq
= g_ptr_array_index(v
->shadow_vqs
, i
);
1255 vhost_svq_stop(svq
);
1256 vhost_vdpa_svq_unmap_rings(dev
, svq
);
1258 event_notifier_cleanup(&svq
->hdev_kick
);
1259 event_notifier_cleanup(&svq
->hdev_call
);
1263 static void vhost_vdpa_suspend(struct vhost_dev
*dev
)
1265 struct vhost_vdpa
*v
= dev
->opaque
;
1268 if (!vhost_vdpa_first_dev(dev
)) {
1272 if (dev
->backend_cap
& BIT_ULL(VHOST_BACKEND_F_SUSPEND
)) {
1273 trace_vhost_vdpa_suspend(dev
);
1274 r
= ioctl(v
->device_fd
, VHOST_VDPA_SUSPEND
);
1276 error_report("Cannot suspend: %s(%d)", g_strerror(errno
), errno
);
1278 v
->suspended
= true;
1283 vhost_vdpa_reset_device(dev
);
1286 static int vhost_vdpa_dev_start(struct vhost_dev
*dev
, bool started
)
1288 struct vhost_vdpa
*v
= dev
->opaque
;
1290 trace_vhost_vdpa_dev_start(dev
, started
);
1293 vhost_vdpa_host_notifiers_init(dev
);
1294 ok
= vhost_vdpa_svqs_start(dev
);
1295 if (unlikely(!ok
)) {
1298 vhost_vdpa_set_vring_ready(dev
);
1300 vhost_vdpa_suspend(dev
);
1301 vhost_vdpa_svqs_stop(dev
);
1302 vhost_vdpa_host_notifiers_uninit(dev
, dev
->nvqs
);
1305 if (dev
->vq_index
+ dev
->nvqs
!= dev
->vq_index_end
) {
1310 if (vhost_dev_has_iommu(dev
) && (v
->shadow_vqs_enabled
)) {
1311 error_report("SVQ can not work while IOMMU enable, please disable"
1312 "IOMMU and try again");
1315 memory_listener_register(&v
->listener
, dev
->vdev
->dma_as
);
1317 return vhost_vdpa_add_status(dev
, VIRTIO_CONFIG_S_DRIVER_OK
);
1323 static void vhost_vdpa_reset_status(struct vhost_dev
*dev
)
1325 struct vhost_vdpa
*v
= dev
->opaque
;
1327 if (dev
->vq_index
+ dev
->nvqs
!= dev
->vq_index_end
) {
1331 vhost_vdpa_reset_device(dev
);
1332 vhost_vdpa_add_status(dev
, VIRTIO_CONFIG_S_ACKNOWLEDGE
|
1333 VIRTIO_CONFIG_S_DRIVER
);
1334 memory_listener_unregister(&v
->listener
);
1337 static int vhost_vdpa_set_log_base(struct vhost_dev
*dev
, uint64_t base
,
1338 struct vhost_log
*log
)
1340 struct vhost_vdpa
*v
= dev
->opaque
;
1341 if (v
->shadow_vqs_enabled
|| !vhost_vdpa_first_dev(dev
)) {
1345 trace_vhost_vdpa_set_log_base(dev
, base
, log
->size
, log
->refcnt
, log
->fd
,
1347 return vhost_vdpa_call(dev
, VHOST_SET_LOG_BASE
, &base
);
1350 static int vhost_vdpa_set_vring_addr(struct vhost_dev
*dev
,
1351 struct vhost_vring_addr
*addr
)
1353 struct vhost_vdpa
*v
= dev
->opaque
;
1355 if (v
->shadow_vqs_enabled
) {
1357 * Device vring addr was set at device start. SVQ base is handled by
1363 return vhost_vdpa_set_vring_dev_addr(dev
, addr
);
1366 static int vhost_vdpa_set_vring_num(struct vhost_dev
*dev
,
1367 struct vhost_vring_state
*ring
)
1369 trace_vhost_vdpa_set_vring_num(dev
, ring
->index
, ring
->num
);
1370 return vhost_vdpa_call(dev
, VHOST_SET_VRING_NUM
, ring
);
1373 static int vhost_vdpa_set_vring_base(struct vhost_dev
*dev
,
1374 struct vhost_vring_state
*ring
)
1376 struct vhost_vdpa
*v
= dev
->opaque
;
1378 if (v
->shadow_vqs_enabled
) {
1380 * Device vring base was set at device start. SVQ base is handled by
1386 return vhost_vdpa_set_dev_vring_base(dev
, ring
);
1389 static int vhost_vdpa_get_vring_base(struct vhost_dev
*dev
,
1390 struct vhost_vring_state
*ring
)
1392 struct vhost_vdpa
*v
= dev
->opaque
;
1395 if (v
->shadow_vqs_enabled
) {
1396 ring
->num
= virtio_queue_get_last_avail_idx(dev
->vdev
, ring
->index
);
1400 if (!v
->suspended
) {
1402 * Cannot trust in value returned by device, let vhost recover used
1408 ret
= vhost_vdpa_call(dev
, VHOST_GET_VRING_BASE
, ring
);
1409 trace_vhost_vdpa_get_vring_base(dev
, ring
->index
, ring
->num
);
1413 static int vhost_vdpa_set_vring_kick(struct vhost_dev
*dev
,
1414 struct vhost_vring_file
*file
)
1416 struct vhost_vdpa
*v
= dev
->opaque
;
1417 int vdpa_idx
= file
->index
- dev
->vq_index
;
1419 if (v
->shadow_vqs_enabled
) {
1420 VhostShadowVirtqueue
*svq
= g_ptr_array_index(v
->shadow_vqs
, vdpa_idx
);
1421 vhost_svq_set_svq_kick_fd(svq
, file
->fd
);
1424 return vhost_vdpa_set_vring_dev_kick(dev
, file
);
1428 static int vhost_vdpa_set_vring_call(struct vhost_dev
*dev
,
1429 struct vhost_vring_file
*file
)
1431 struct vhost_vdpa
*v
= dev
->opaque
;
1432 int vdpa_idx
= file
->index
- dev
->vq_index
;
1433 VhostShadowVirtqueue
*svq
= g_ptr_array_index(v
->shadow_vqs
, vdpa_idx
);
1435 /* Remember last call fd because we can switch to SVQ anytime. */
1436 vhost_svq_set_svq_call_fd(svq
, file
->fd
);
1437 if (v
->shadow_vqs_enabled
) {
1441 return vhost_vdpa_set_vring_dev_call(dev
, file
);
1444 static int vhost_vdpa_get_features(struct vhost_dev
*dev
,
1447 int ret
= vhost_vdpa_get_dev_features(dev
, features
);
1450 /* Add SVQ logging capabilities */
1451 *features
|= BIT_ULL(VHOST_F_LOG_ALL
);
1457 static int vhost_vdpa_set_owner(struct vhost_dev
*dev
)
1459 if (!vhost_vdpa_first_dev(dev
)) {
1463 trace_vhost_vdpa_set_owner(dev
);
1464 return vhost_vdpa_call(dev
, VHOST_SET_OWNER
, NULL
);
1467 static int vhost_vdpa_vq_get_addr(struct vhost_dev
*dev
,
1468 struct vhost_vring_addr
*addr
, struct vhost_virtqueue
*vq
)
1470 assert(dev
->vhost_ops
->backend_type
== VHOST_BACKEND_TYPE_VDPA
);
1471 addr
->desc_user_addr
= (uint64_t)(unsigned long)vq
->desc_phys
;
1472 addr
->avail_user_addr
= (uint64_t)(unsigned long)vq
->avail_phys
;
1473 addr
->used_user_addr
= (uint64_t)(unsigned long)vq
->used_phys
;
1474 trace_vhost_vdpa_vq_get_addr(dev
, vq
, addr
->desc_user_addr
,
1475 addr
->avail_user_addr
, addr
->used_user_addr
);
1479 static bool vhost_vdpa_force_iommu(struct vhost_dev
*dev
)
1484 const VhostOps vdpa_ops
= {
1485 .backend_type
= VHOST_BACKEND_TYPE_VDPA
,
1486 .vhost_backend_init
= vhost_vdpa_init
,
1487 .vhost_backend_cleanup
= vhost_vdpa_cleanup
,
1488 .vhost_set_log_base
= vhost_vdpa_set_log_base
,
1489 .vhost_set_vring_addr
= vhost_vdpa_set_vring_addr
,
1490 .vhost_set_vring_num
= vhost_vdpa_set_vring_num
,
1491 .vhost_set_vring_base
= vhost_vdpa_set_vring_base
,
1492 .vhost_get_vring_base
= vhost_vdpa_get_vring_base
,
1493 .vhost_set_vring_kick
= vhost_vdpa_set_vring_kick
,
1494 .vhost_set_vring_call
= vhost_vdpa_set_vring_call
,
1495 .vhost_get_features
= vhost_vdpa_get_features
,
1496 .vhost_set_backend_cap
= vhost_vdpa_set_backend_cap
,
1497 .vhost_set_owner
= vhost_vdpa_set_owner
,
1498 .vhost_set_vring_endian
= NULL
,
1499 .vhost_backend_memslots_limit
= vhost_vdpa_memslots_limit
,
1500 .vhost_set_mem_table
= vhost_vdpa_set_mem_table
,
1501 .vhost_set_features
= vhost_vdpa_set_features
,
1502 .vhost_reset_device
= vhost_vdpa_reset_device
,
1503 .vhost_get_vq_index
= vhost_vdpa_get_vq_index
,
1504 .vhost_get_config
= vhost_vdpa_get_config
,
1505 .vhost_set_config
= vhost_vdpa_set_config
,
1506 .vhost_requires_shm_log
= NULL
,
1507 .vhost_migration_done
= NULL
,
1508 .vhost_backend_can_merge
= NULL
,
1509 .vhost_net_set_mtu
= NULL
,
1510 .vhost_set_iotlb_callback
= NULL
,
1511 .vhost_send_device_iotlb_msg
= NULL
,
1512 .vhost_dev_start
= vhost_vdpa_dev_start
,
1513 .vhost_get_device_id
= vhost_vdpa_get_device_id
,
1514 .vhost_vq_get_addr
= vhost_vdpa_vq_get_addr
,
1515 .vhost_force_iommu
= vhost_vdpa_force_iommu
,
1516 .vhost_set_config_call
= vhost_vdpa_set_config_call
,
1517 .vhost_reset_status
= vhost_vdpa_reset_status
,