4 * Copyright(c) 2017-2018 Intel Corporation.
5 * Copyright(c) 2020 Red Hat, Inc.
7 * This work is licensed under the terms of the GNU GPL, version 2 or later.
8 * See the COPYING file in the top-level directory.
12 #include "qemu/osdep.h"
13 #include <linux/vhost.h>
14 #include <linux/vfio.h>
15 #include <sys/eventfd.h>
16 #include <sys/ioctl.h>
17 #include "hw/virtio/vhost.h"
18 #include "hw/virtio/vhost-backend.h"
19 #include "hw/virtio/virtio-net.h"
20 #include "hw/virtio/vhost-vdpa.h"
21 #include "exec/address-spaces.h"
22 #include "qemu/main-loop.h"
25 #include "qemu-common.h"
28 * Return one past the end of the end of section. Be careful with uint64_t
31 static Int128
vhost_vdpa_section_end(const MemoryRegionSection
*section
)
33 Int128 llend
= int128_make64(section
->offset_within_address_space
);
34 llend
= int128_add(llend
, section
->size
);
35 llend
= int128_and(llend
, int128_exts64(TARGET_PAGE_MASK
));
40 static bool vhost_vdpa_listener_skipped_section(MemoryRegionSection
*section
,
46 if ((!memory_region_is_ram(section
->mr
) &&
47 !memory_region_is_iommu(section
->mr
)) ||
48 memory_region_is_protected(section
->mr
) ||
49 /* vhost-vDPA doesn't allow MMIO to be mapped */
50 memory_region_is_ram_device(section
->mr
)) {
54 if (section
->offset_within_address_space
< iova_min
) {
55 error_report("RAM section out of device range (min=0x%" PRIx64
56 ", addr=0x%" HWADDR_PRIx
")",
57 iova_min
, section
->offset_within_address_space
);
61 llend
= vhost_vdpa_section_end(section
);
62 if (int128_gt(llend
, int128_make64(iova_max
))) {
63 error_report("RAM section out of device range (max=0x%" PRIx64
64 ", end addr=0x%" PRIx64
")",
65 iova_max
, int128_get64(llend
));
72 static int vhost_vdpa_dma_map(struct vhost_vdpa
*v
, hwaddr iova
, hwaddr size
,
73 void *vaddr
, bool readonly
)
75 struct vhost_msg_v2 msg
= {};
76 int fd
= v
->device_fd
;
79 msg
.type
= v
->msg_type
;
80 msg
.iotlb
.iova
= iova
;
81 msg
.iotlb
.size
= size
;
82 msg
.iotlb
.uaddr
= (uint64_t)(uintptr_t)vaddr
;
83 msg
.iotlb
.perm
= readonly
? VHOST_ACCESS_RO
: VHOST_ACCESS_RW
;
84 msg
.iotlb
.type
= VHOST_IOTLB_UPDATE
;
86 trace_vhost_vdpa_dma_map(v
, fd
, msg
.type
, msg
.iotlb
.iova
, msg
.iotlb
.size
,
87 msg
.iotlb
.uaddr
, msg
.iotlb
.perm
, msg
.iotlb
.type
);
89 if (write(fd
, &msg
, sizeof(msg
)) != sizeof(msg
)) {
90 error_report("failed to write, fd=%d, errno=%d (%s)",
91 fd
, errno
, strerror(errno
));
98 static int vhost_vdpa_dma_unmap(struct vhost_vdpa
*v
, hwaddr iova
,
101 struct vhost_msg_v2 msg
= {};
102 int fd
= v
->device_fd
;
105 msg
.type
= v
->msg_type
;
106 msg
.iotlb
.iova
= iova
;
107 msg
.iotlb
.size
= size
;
108 msg
.iotlb
.type
= VHOST_IOTLB_INVALIDATE
;
110 trace_vhost_vdpa_dma_unmap(v
, fd
, msg
.type
, msg
.iotlb
.iova
,
111 msg
.iotlb
.size
, msg
.iotlb
.type
);
113 if (write(fd
, &msg
, sizeof(msg
)) != sizeof(msg
)) {
114 error_report("failed to write, fd=%d, errno=%d (%s)",
115 fd
, errno
, strerror(errno
));
122 static void vhost_vdpa_listener_begin_batch(struct vhost_vdpa
*v
)
124 int fd
= v
->device_fd
;
125 struct vhost_msg_v2 msg
= {
127 .iotlb
.type
= VHOST_IOTLB_BATCH_BEGIN
,
130 if (write(fd
, &msg
, sizeof(msg
)) != sizeof(msg
)) {
131 error_report("failed to write, fd=%d, errno=%d (%s)",
132 fd
, errno
, strerror(errno
));
136 static void vhost_vdpa_iotlb_batch_begin_once(struct vhost_vdpa
*v
)
138 if (v
->dev
->backend_cap
& (0x1ULL
<< VHOST_BACKEND_F_IOTLB_BATCH
) &&
139 !v
->iotlb_batch_begin_sent
) {
140 vhost_vdpa_listener_begin_batch(v
);
143 v
->iotlb_batch_begin_sent
= true;
146 static void vhost_vdpa_listener_commit(MemoryListener
*listener
)
148 struct vhost_vdpa
*v
= container_of(listener
, struct vhost_vdpa
, listener
);
149 struct vhost_dev
*dev
= v
->dev
;
150 struct vhost_msg_v2 msg
= {};
151 int fd
= v
->device_fd
;
153 if (!(dev
->backend_cap
& (0x1ULL
<< VHOST_BACKEND_F_IOTLB_BATCH
))) {
157 if (!v
->iotlb_batch_begin_sent
) {
161 msg
.type
= v
->msg_type
;
162 msg
.iotlb
.type
= VHOST_IOTLB_BATCH_END
;
164 if (write(fd
, &msg
, sizeof(msg
)) != sizeof(msg
)) {
165 error_report("failed to write, fd=%d, errno=%d (%s)",
166 fd
, errno
, strerror(errno
));
169 v
->iotlb_batch_begin_sent
= false;
172 static void vhost_vdpa_listener_region_add(MemoryListener
*listener
,
173 MemoryRegionSection
*section
)
175 struct vhost_vdpa
*v
= container_of(listener
, struct vhost_vdpa
, listener
);
177 Int128 llend
, llsize
;
181 if (vhost_vdpa_listener_skipped_section(section
, v
->iova_range
.first
,
182 v
->iova_range
.last
)) {
186 if (unlikely((section
->offset_within_address_space
& ~TARGET_PAGE_MASK
) !=
187 (section
->offset_within_region
& ~TARGET_PAGE_MASK
))) {
188 error_report("%s received unaligned region", __func__
);
192 iova
= TARGET_PAGE_ALIGN(section
->offset_within_address_space
);
193 llend
= vhost_vdpa_section_end(section
);
194 if (int128_ge(int128_make64(iova
), llend
)) {
198 memory_region_ref(section
->mr
);
200 /* Here we assume that memory_region_is_ram(section->mr)==true */
202 vaddr
= memory_region_get_ram_ptr(section
->mr
) +
203 section
->offset_within_region
+
204 (iova
- section
->offset_within_address_space
);
206 trace_vhost_vdpa_listener_region_add(v
, iova
, int128_get64(llend
),
207 vaddr
, section
->readonly
);
209 llsize
= int128_sub(llend
, int128_make64(iova
));
211 vhost_vdpa_iotlb_batch_begin_once(v
);
212 ret
= vhost_vdpa_dma_map(v
, iova
, int128_get64(llsize
),
213 vaddr
, section
->readonly
);
215 error_report("vhost vdpa map fail!");
223 * On the initfn path, store the first error in the container so we
224 * can gracefully fail. Runtime, there's not much we can do other
225 * than throw a hardware error.
227 error_report("vhost-vdpa: DMA mapping failed, unable to continue");
232 static void vhost_vdpa_listener_region_del(MemoryListener
*listener
,
233 MemoryRegionSection
*section
)
235 struct vhost_vdpa
*v
= container_of(listener
, struct vhost_vdpa
, listener
);
237 Int128 llend
, llsize
;
240 if (vhost_vdpa_listener_skipped_section(section
, v
->iova_range
.first
,
241 v
->iova_range
.last
)) {
245 if (unlikely((section
->offset_within_address_space
& ~TARGET_PAGE_MASK
) !=
246 (section
->offset_within_region
& ~TARGET_PAGE_MASK
))) {
247 error_report("%s received unaligned region", __func__
);
251 iova
= TARGET_PAGE_ALIGN(section
->offset_within_address_space
);
252 llend
= vhost_vdpa_section_end(section
);
254 trace_vhost_vdpa_listener_region_del(v
, iova
, int128_get64(llend
));
256 if (int128_ge(int128_make64(iova
), llend
)) {
260 llsize
= int128_sub(llend
, int128_make64(iova
));
262 vhost_vdpa_iotlb_batch_begin_once(v
);
263 ret
= vhost_vdpa_dma_unmap(v
, iova
, int128_get64(llsize
));
265 error_report("vhost_vdpa dma unmap error!");
268 memory_region_unref(section
->mr
);
271 * IOTLB API is used by vhost-vpda which requires incremental updating
272 * of the mapping. So we can not use generic vhost memory listener which
273 * depends on the addnop().
275 static const MemoryListener vhost_vdpa_memory_listener
= {
276 .name
= "vhost-vdpa",
277 .commit
= vhost_vdpa_listener_commit
,
278 .region_add
= vhost_vdpa_listener_region_add
,
279 .region_del
= vhost_vdpa_listener_region_del
,
282 static int vhost_vdpa_call(struct vhost_dev
*dev
, unsigned long int request
,
285 struct vhost_vdpa
*v
= dev
->opaque
;
286 int fd
= v
->device_fd
;
289 assert(dev
->vhost_ops
->backend_type
== VHOST_BACKEND_TYPE_VDPA
);
291 ret
= ioctl(fd
, request
, arg
);
292 return ret
< 0 ? -errno
: ret
;
295 static void vhost_vdpa_add_status(struct vhost_dev
*dev
, uint8_t status
)
299 trace_vhost_vdpa_add_status(dev
, status
);
300 if (vhost_vdpa_call(dev
, VHOST_VDPA_GET_STATUS
, &s
)) {
306 vhost_vdpa_call(dev
, VHOST_VDPA_SET_STATUS
, &s
);
309 static void vhost_vdpa_get_iova_range(struct vhost_vdpa
*v
)
311 int ret
= vhost_vdpa_call(v
->dev
, VHOST_VDPA_GET_IOVA_RANGE
,
314 v
->iova_range
.first
= 0;
315 v
->iova_range
.last
= UINT64_MAX
;
318 trace_vhost_vdpa_get_iova_range(v
->dev
, v
->iova_range
.first
,
322 static bool vhost_vdpa_one_time_request(struct vhost_dev
*dev
)
324 struct vhost_vdpa
*v
= dev
->opaque
;
326 return v
->index
!= 0;
329 static int vhost_vdpa_init(struct vhost_dev
*dev
, void *opaque
, Error
**errp
)
331 struct vhost_vdpa
*v
;
332 assert(dev
->vhost_ops
->backend_type
== VHOST_BACKEND_TYPE_VDPA
);
333 trace_vhost_vdpa_init(dev
, opaque
);
337 dev
->opaque
= opaque
;
338 v
->listener
= vhost_vdpa_memory_listener
;
339 v
->msg_type
= VHOST_IOTLB_MSG_V2
;
341 vhost_vdpa_get_iova_range(v
);
343 if (vhost_vdpa_one_time_request(dev
)) {
347 vhost_vdpa_add_status(dev
, VIRTIO_CONFIG_S_ACKNOWLEDGE
|
348 VIRTIO_CONFIG_S_DRIVER
);
353 static void vhost_vdpa_host_notifier_uninit(struct vhost_dev
*dev
,
356 size_t page_size
= qemu_real_host_page_size
;
357 struct vhost_vdpa
*v
= dev
->opaque
;
358 VirtIODevice
*vdev
= dev
->vdev
;
359 VhostVDPAHostNotifier
*n
;
361 n
= &v
->notifier
[queue_index
];
364 virtio_queue_set_host_notifier_mr(vdev
, queue_index
, &n
->mr
, false);
365 object_unparent(OBJECT(&n
->mr
));
366 munmap(n
->addr
, page_size
);
371 static void vhost_vdpa_host_notifiers_uninit(struct vhost_dev
*dev
, int n
)
375 for (i
= 0; i
< n
; i
++) {
376 vhost_vdpa_host_notifier_uninit(dev
, i
);
380 static int vhost_vdpa_host_notifier_init(struct vhost_dev
*dev
, int queue_index
)
382 size_t page_size
= qemu_real_host_page_size
;
383 struct vhost_vdpa
*v
= dev
->opaque
;
384 VirtIODevice
*vdev
= dev
->vdev
;
385 VhostVDPAHostNotifier
*n
;
386 int fd
= v
->device_fd
;
390 vhost_vdpa_host_notifier_uninit(dev
, queue_index
);
392 n
= &v
->notifier
[queue_index
];
394 addr
= mmap(NULL
, page_size
, PROT_WRITE
, MAP_SHARED
, fd
,
395 queue_index
* page_size
);
396 if (addr
== MAP_FAILED
) {
400 name
= g_strdup_printf("vhost-vdpa/host-notifier@%p mmaps[%d]",
402 memory_region_init_ram_device_ptr(&n
->mr
, OBJECT(vdev
), name
,
406 if (virtio_queue_set_host_notifier_mr(vdev
, queue_index
, &n
->mr
, true)) {
407 munmap(addr
, page_size
);
418 static void vhost_vdpa_host_notifiers_init(struct vhost_dev
*dev
)
422 for (i
= dev
->vq_index
; i
< dev
->vq_index
+ dev
->nvqs
; i
++) {
423 if (vhost_vdpa_host_notifier_init(dev
, i
)) {
431 vhost_vdpa_host_notifiers_uninit(dev
, i
);
435 static int vhost_vdpa_cleanup(struct vhost_dev
*dev
)
437 struct vhost_vdpa
*v
;
438 assert(dev
->vhost_ops
->backend_type
== VHOST_BACKEND_TYPE_VDPA
);
440 trace_vhost_vdpa_cleanup(dev
, v
);
441 vhost_vdpa_host_notifiers_uninit(dev
, dev
->nvqs
);
442 memory_listener_unregister(&v
->listener
);
448 static int vhost_vdpa_memslots_limit(struct vhost_dev
*dev
)
450 trace_vhost_vdpa_memslots_limit(dev
, INT_MAX
);
454 static int vhost_vdpa_set_mem_table(struct vhost_dev
*dev
,
455 struct vhost_memory
*mem
)
457 if (vhost_vdpa_one_time_request(dev
)) {
461 trace_vhost_vdpa_set_mem_table(dev
, mem
->nregions
, mem
->padding
);
462 if (trace_event_get_state_backends(TRACE_VHOST_VDPA_SET_MEM_TABLE
) &&
463 trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_REGIONS
)) {
465 for (i
= 0; i
< mem
->nregions
; i
++) {
466 trace_vhost_vdpa_dump_regions(dev
, i
,
467 mem
->regions
[i
].guest_phys_addr
,
468 mem
->regions
[i
].memory_size
,
469 mem
->regions
[i
].userspace_addr
,
470 mem
->regions
[i
].flags_padding
);
480 static int vhost_vdpa_set_features(struct vhost_dev
*dev
,
485 if (vhost_vdpa_one_time_request(dev
)) {
489 trace_vhost_vdpa_set_features(dev
, features
);
490 ret
= vhost_vdpa_call(dev
, VHOST_SET_FEATURES
, &features
);
495 vhost_vdpa_add_status(dev
, VIRTIO_CONFIG_S_FEATURES_OK
);
496 vhost_vdpa_call(dev
, VHOST_VDPA_GET_STATUS
, &status
);
498 return !(status
& VIRTIO_CONFIG_S_FEATURES_OK
);
501 static int vhost_vdpa_set_backend_cap(struct vhost_dev
*dev
)
504 uint64_t f
= 0x1ULL
<< VHOST_BACKEND_F_IOTLB_MSG_V2
|
505 0x1ULL
<< VHOST_BACKEND_F_IOTLB_BATCH
;
508 if (vhost_vdpa_call(dev
, VHOST_GET_BACKEND_FEATURES
, &features
)) {
514 if (vhost_vdpa_one_time_request(dev
)) {
515 r
= vhost_vdpa_call(dev
, VHOST_SET_BACKEND_FEATURES
, &features
);
521 dev
->backend_cap
= features
;
526 static int vhost_vdpa_get_device_id(struct vhost_dev
*dev
,
530 ret
= vhost_vdpa_call(dev
, VHOST_VDPA_GET_DEVICE_ID
, device_id
);
531 trace_vhost_vdpa_get_device_id(dev
, *device_id
);
535 static int vhost_vdpa_reset_device(struct vhost_dev
*dev
)
540 ret
= vhost_vdpa_call(dev
, VHOST_VDPA_SET_STATUS
, &status
);
541 trace_vhost_vdpa_reset_device(dev
, status
);
545 static int vhost_vdpa_get_vq_index(struct vhost_dev
*dev
, int idx
)
547 assert(idx
>= dev
->vq_index
&& idx
< dev
->vq_index
+ dev
->nvqs
);
549 trace_vhost_vdpa_get_vq_index(dev
, idx
, idx
);
553 static int vhost_vdpa_set_vring_ready(struct vhost_dev
*dev
)
556 trace_vhost_vdpa_set_vring_ready(dev
);
557 for (i
= 0; i
< dev
->nvqs
; ++i
) {
558 struct vhost_vring_state state
= {
559 .index
= dev
->vq_index
+ i
,
562 vhost_vdpa_call(dev
, VHOST_VDPA_SET_VRING_ENABLE
, &state
);
567 static void vhost_vdpa_dump_config(struct vhost_dev
*dev
, const uint8_t *config
,
571 char line
[QEMU_HEXDUMP_LINE_LEN
];
573 for (b
= 0; b
< config_len
; b
+= 16) {
574 len
= config_len
- b
;
575 qemu_hexdump_line(line
, b
, config
, len
, false);
576 trace_vhost_vdpa_dump_config(dev
, line
);
580 static int vhost_vdpa_set_config(struct vhost_dev
*dev
, const uint8_t *data
,
581 uint32_t offset
, uint32_t size
,
584 struct vhost_vdpa_config
*config
;
586 unsigned long config_size
= offsetof(struct vhost_vdpa_config
, buf
);
588 trace_vhost_vdpa_set_config(dev
, offset
, size
, flags
);
589 config
= g_malloc(size
+ config_size
);
590 config
->off
= offset
;
592 memcpy(config
->buf
, data
, size
);
593 if (trace_event_get_state_backends(TRACE_VHOST_VDPA_SET_CONFIG
) &&
594 trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_CONFIG
)) {
595 vhost_vdpa_dump_config(dev
, data
, size
);
597 ret
= vhost_vdpa_call(dev
, VHOST_VDPA_SET_CONFIG
, config
);
602 static int vhost_vdpa_get_config(struct vhost_dev
*dev
, uint8_t *config
,
603 uint32_t config_len
, Error
**errp
)
605 struct vhost_vdpa_config
*v_config
;
606 unsigned long config_size
= offsetof(struct vhost_vdpa_config
, buf
);
609 trace_vhost_vdpa_get_config(dev
, config
, config_len
);
610 v_config
= g_malloc(config_len
+ config_size
);
611 v_config
->len
= config_len
;
613 ret
= vhost_vdpa_call(dev
, VHOST_VDPA_GET_CONFIG
, v_config
);
614 memcpy(config
, v_config
->buf
, config_len
);
616 if (trace_event_get_state_backends(TRACE_VHOST_VDPA_GET_CONFIG
) &&
617 trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_CONFIG
)) {
618 vhost_vdpa_dump_config(dev
, config
, config_len
);
623 static int vhost_vdpa_dev_start(struct vhost_dev
*dev
, bool started
)
625 struct vhost_vdpa
*v
= dev
->opaque
;
626 trace_vhost_vdpa_dev_start(dev
, started
);
629 vhost_vdpa_host_notifiers_init(dev
);
630 vhost_vdpa_set_vring_ready(dev
);
632 vhost_vdpa_host_notifiers_uninit(dev
, dev
->nvqs
);
635 if (dev
->vq_index
+ dev
->nvqs
!= dev
->last_index
) {
641 memory_listener_register(&v
->listener
, &address_space_memory
);
642 vhost_vdpa_add_status(dev
, VIRTIO_CONFIG_S_DRIVER_OK
);
643 vhost_vdpa_call(dev
, VHOST_VDPA_GET_STATUS
, &status
);
645 return !(status
& VIRTIO_CONFIG_S_DRIVER_OK
);
647 vhost_vdpa_reset_device(dev
);
648 vhost_vdpa_add_status(dev
, VIRTIO_CONFIG_S_ACKNOWLEDGE
|
649 VIRTIO_CONFIG_S_DRIVER
);
650 memory_listener_unregister(&v
->listener
);
656 static int vhost_vdpa_set_log_base(struct vhost_dev
*dev
, uint64_t base
,
657 struct vhost_log
*log
)
659 if (vhost_vdpa_one_time_request(dev
)) {
663 trace_vhost_vdpa_set_log_base(dev
, base
, log
->size
, log
->refcnt
, log
->fd
,
665 return vhost_vdpa_call(dev
, VHOST_SET_LOG_BASE
, &base
);
668 static int vhost_vdpa_set_vring_addr(struct vhost_dev
*dev
,
669 struct vhost_vring_addr
*addr
)
671 trace_vhost_vdpa_set_vring_addr(dev
, addr
->index
, addr
->flags
,
672 addr
->desc_user_addr
, addr
->used_user_addr
,
673 addr
->avail_user_addr
,
674 addr
->log_guest_addr
);
675 return vhost_vdpa_call(dev
, VHOST_SET_VRING_ADDR
, addr
);
678 static int vhost_vdpa_set_vring_num(struct vhost_dev
*dev
,
679 struct vhost_vring_state
*ring
)
681 trace_vhost_vdpa_set_vring_num(dev
, ring
->index
, ring
->num
);
682 return vhost_vdpa_call(dev
, VHOST_SET_VRING_NUM
, ring
);
685 static int vhost_vdpa_set_vring_base(struct vhost_dev
*dev
,
686 struct vhost_vring_state
*ring
)
688 trace_vhost_vdpa_set_vring_base(dev
, ring
->index
, ring
->num
);
689 return vhost_vdpa_call(dev
, VHOST_SET_VRING_BASE
, ring
);
692 static int vhost_vdpa_get_vring_base(struct vhost_dev
*dev
,
693 struct vhost_vring_state
*ring
)
697 ret
= vhost_vdpa_call(dev
, VHOST_GET_VRING_BASE
, ring
);
698 trace_vhost_vdpa_get_vring_base(dev
, ring
->index
, ring
->num
);
702 static int vhost_vdpa_set_vring_kick(struct vhost_dev
*dev
,
703 struct vhost_vring_file
*file
)
705 trace_vhost_vdpa_set_vring_kick(dev
, file
->index
, file
->fd
);
706 return vhost_vdpa_call(dev
, VHOST_SET_VRING_KICK
, file
);
709 static int vhost_vdpa_set_vring_call(struct vhost_dev
*dev
,
710 struct vhost_vring_file
*file
)
712 trace_vhost_vdpa_set_vring_call(dev
, file
->index
, file
->fd
);
713 return vhost_vdpa_call(dev
, VHOST_SET_VRING_CALL
, file
);
716 static int vhost_vdpa_get_features(struct vhost_dev
*dev
,
721 ret
= vhost_vdpa_call(dev
, VHOST_GET_FEATURES
, features
);
722 trace_vhost_vdpa_get_features(dev
, *features
);
726 static int vhost_vdpa_set_owner(struct vhost_dev
*dev
)
728 if (vhost_vdpa_one_time_request(dev
)) {
732 trace_vhost_vdpa_set_owner(dev
);
733 return vhost_vdpa_call(dev
, VHOST_SET_OWNER
, NULL
);
736 static int vhost_vdpa_vq_get_addr(struct vhost_dev
*dev
,
737 struct vhost_vring_addr
*addr
, struct vhost_virtqueue
*vq
)
739 assert(dev
->vhost_ops
->backend_type
== VHOST_BACKEND_TYPE_VDPA
);
740 addr
->desc_user_addr
= (uint64_t)(unsigned long)vq
->desc_phys
;
741 addr
->avail_user_addr
= (uint64_t)(unsigned long)vq
->avail_phys
;
742 addr
->used_user_addr
= (uint64_t)(unsigned long)vq
->used_phys
;
743 trace_vhost_vdpa_vq_get_addr(dev
, vq
, addr
->desc_user_addr
,
744 addr
->avail_user_addr
, addr
->used_user_addr
);
748 static bool vhost_vdpa_force_iommu(struct vhost_dev
*dev
)
753 const VhostOps vdpa_ops
= {
754 .backend_type
= VHOST_BACKEND_TYPE_VDPA
,
755 .vhost_backend_init
= vhost_vdpa_init
,
756 .vhost_backend_cleanup
= vhost_vdpa_cleanup
,
757 .vhost_set_log_base
= vhost_vdpa_set_log_base
,
758 .vhost_set_vring_addr
= vhost_vdpa_set_vring_addr
,
759 .vhost_set_vring_num
= vhost_vdpa_set_vring_num
,
760 .vhost_set_vring_base
= vhost_vdpa_set_vring_base
,
761 .vhost_get_vring_base
= vhost_vdpa_get_vring_base
,
762 .vhost_set_vring_kick
= vhost_vdpa_set_vring_kick
,
763 .vhost_set_vring_call
= vhost_vdpa_set_vring_call
,
764 .vhost_get_features
= vhost_vdpa_get_features
,
765 .vhost_set_backend_cap
= vhost_vdpa_set_backend_cap
,
766 .vhost_set_owner
= vhost_vdpa_set_owner
,
767 .vhost_set_vring_endian
= NULL
,
768 .vhost_backend_memslots_limit
= vhost_vdpa_memslots_limit
,
769 .vhost_set_mem_table
= vhost_vdpa_set_mem_table
,
770 .vhost_set_features
= vhost_vdpa_set_features
,
771 .vhost_reset_device
= vhost_vdpa_reset_device
,
772 .vhost_get_vq_index
= vhost_vdpa_get_vq_index
,
773 .vhost_get_config
= vhost_vdpa_get_config
,
774 .vhost_set_config
= vhost_vdpa_set_config
,
775 .vhost_requires_shm_log
= NULL
,
776 .vhost_migration_done
= NULL
,
777 .vhost_backend_can_merge
= NULL
,
778 .vhost_net_set_mtu
= NULL
,
779 .vhost_set_iotlb_callback
= NULL
,
780 .vhost_send_device_iotlb_msg
= NULL
,
781 .vhost_dev_start
= vhost_vdpa_dev_start
,
782 .vhost_get_device_id
= vhost_vdpa_get_device_id
,
783 .vhost_vq_get_addr
= vhost_vdpa_vq_get_addr
,
784 .vhost_force_iommu
= vhost_vdpa_force_iommu
,