tests/acceptance: Ignore binary data sent on serial console
[qemu/ar7.git] / hw / virtio / vhost-vdpa.c
blob4fa414feea4a0b6828d3c3b34b927d63463f8fd7
1 /*
2 * vhost-vdpa
4 * Copyright(c) 2017-2018 Intel Corporation.
5 * Copyright(c) 2020 Red Hat, Inc.
7 * This work is licensed under the terms of the GNU GPL, version 2 or later.
8 * See the COPYING file in the top-level directory.
12 #include "qemu/osdep.h"
13 #include <linux/vhost.h>
14 #include <linux/vfio.h>
15 #include <sys/eventfd.h>
16 #include <sys/ioctl.h>
17 #include "hw/virtio/vhost.h"
18 #include "hw/virtio/vhost-backend.h"
19 #include "hw/virtio/virtio-net.h"
20 #include "hw/virtio/vhost-vdpa.h"
21 #include "exec/address-spaces.h"
22 #include "qemu/main-loop.h"
23 #include "cpu.h"
24 #include "trace.h"
25 #include "qemu-common.h"
27 static bool vhost_vdpa_listener_skipped_section(MemoryRegionSection *section)
29 return (!memory_region_is_ram(section->mr) &&
30 !memory_region_is_iommu(section->mr)) ||
31 /* vhost-vDPA doesn't allow MMIO to be mapped */
32 memory_region_is_ram_device(section->mr) ||
34 * Sizing an enabled 64-bit BAR can cause spurious mappings to
35 * addresses in the upper part of the 64-bit address space. These
36 * are never accessed by the CPU and beyond the address width of
37 * some IOMMU hardware. TODO: VDPA should tell us the IOMMU width.
39 section->offset_within_address_space & (1ULL << 63);
42 static int vhost_vdpa_dma_map(struct vhost_vdpa *v, hwaddr iova, hwaddr size,
43 void *vaddr, bool readonly)
45 struct vhost_msg_v2 msg = {};
46 int fd = v->device_fd;
47 int ret = 0;
49 msg.type = v->msg_type;
50 msg.iotlb.iova = iova;
51 msg.iotlb.size = size;
52 msg.iotlb.uaddr = (uint64_t)(uintptr_t)vaddr;
53 msg.iotlb.perm = readonly ? VHOST_ACCESS_RO : VHOST_ACCESS_RW;
54 msg.iotlb.type = VHOST_IOTLB_UPDATE;
56 trace_vhost_vdpa_dma_map(v, fd, msg.type, msg.iotlb.iova, msg.iotlb.size,
57 msg.iotlb.uaddr, msg.iotlb.perm, msg.iotlb.type);
59 if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
60 error_report("failed to write, fd=%d, errno=%d (%s)",
61 fd, errno, strerror(errno));
62 return -EIO ;
65 return ret;
68 static int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, hwaddr iova,
69 hwaddr size)
71 struct vhost_msg_v2 msg = {};
72 int fd = v->device_fd;
73 int ret = 0;
75 msg.type = v->msg_type;
76 msg.iotlb.iova = iova;
77 msg.iotlb.size = size;
78 msg.iotlb.type = VHOST_IOTLB_INVALIDATE;
80 trace_vhost_vdpa_dma_unmap(v, fd, msg.type, msg.iotlb.iova,
81 msg.iotlb.size, msg.iotlb.type);
83 if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
84 error_report("failed to write, fd=%d, errno=%d (%s)",
85 fd, errno, strerror(errno));
86 return -EIO ;
89 return ret;
92 static void vhost_vdpa_listener_begin(MemoryListener *listener)
94 struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener);
95 struct vhost_dev *dev = v->dev;
96 struct vhost_msg_v2 msg = {};
97 int fd = v->device_fd;
99 if (!(dev->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH))) {
100 return;
103 msg.type = v->msg_type;
104 msg.iotlb.type = VHOST_IOTLB_BATCH_BEGIN;
106 if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
107 error_report("failed to write, fd=%d, errno=%d (%s)",
108 fd, errno, strerror(errno));
112 static void vhost_vdpa_listener_commit(MemoryListener *listener)
114 struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener);
115 struct vhost_dev *dev = v->dev;
116 struct vhost_msg_v2 msg = {};
117 int fd = v->device_fd;
119 if (!(dev->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH))) {
120 return;
123 msg.type = v->msg_type;
124 msg.iotlb.type = VHOST_IOTLB_BATCH_END;
126 if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
127 error_report("failed to write, fd=%d, errno=%d (%s)",
128 fd, errno, strerror(errno));
132 static void vhost_vdpa_listener_region_add(MemoryListener *listener,
133 MemoryRegionSection *section)
135 struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener);
136 hwaddr iova;
137 Int128 llend, llsize;
138 void *vaddr;
139 int ret;
141 if (vhost_vdpa_listener_skipped_section(section)) {
142 return;
145 if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
146 (section->offset_within_region & ~TARGET_PAGE_MASK))) {
147 error_report("%s received unaligned region", __func__);
148 return;
151 iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
152 llend = int128_make64(section->offset_within_address_space);
153 llend = int128_add(llend, section->size);
154 llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK));
156 if (int128_ge(int128_make64(iova), llend)) {
157 return;
160 memory_region_ref(section->mr);
162 /* Here we assume that memory_region_is_ram(section->mr)==true */
164 vaddr = memory_region_get_ram_ptr(section->mr) +
165 section->offset_within_region +
166 (iova - section->offset_within_address_space);
168 trace_vhost_vdpa_listener_region_add(v, iova, int128_get64(llend),
169 vaddr, section->readonly);
171 llsize = int128_sub(llend, int128_make64(iova));
173 ret = vhost_vdpa_dma_map(v, iova, int128_get64(llsize),
174 vaddr, section->readonly);
175 if (ret) {
176 error_report("vhost vdpa map fail!");
177 goto fail;
180 return;
182 fail:
184 * On the initfn path, store the first error in the container so we
185 * can gracefully fail. Runtime, there's not much we can do other
186 * than throw a hardware error.
188 error_report("vhost-vdpa: DMA mapping failed, unable to continue");
189 return;
193 static void vhost_vdpa_listener_region_del(MemoryListener *listener,
194 MemoryRegionSection *section)
196 struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener);
197 hwaddr iova;
198 Int128 llend, llsize;
199 int ret;
201 if (vhost_vdpa_listener_skipped_section(section)) {
202 return;
205 if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
206 (section->offset_within_region & ~TARGET_PAGE_MASK))) {
207 error_report("%s received unaligned region", __func__);
208 return;
211 iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
212 llend = int128_make64(section->offset_within_address_space);
213 llend = int128_add(llend, section->size);
214 llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK));
216 trace_vhost_vdpa_listener_region_del(v, iova, int128_get64(llend));
218 if (int128_ge(int128_make64(iova), llend)) {
219 return;
222 llsize = int128_sub(llend, int128_make64(iova));
224 ret = vhost_vdpa_dma_unmap(v, iova, int128_get64(llsize));
225 if (ret) {
226 error_report("vhost_vdpa dma unmap error!");
229 memory_region_unref(section->mr);
232 * IOTLB API is used by vhost-vpda which requires incremental updating
233 * of the mapping. So we can not use generic vhost memory listener which
234 * depends on the addnop().
236 static const MemoryListener vhost_vdpa_memory_listener = {
237 .begin = vhost_vdpa_listener_begin,
238 .commit = vhost_vdpa_listener_commit,
239 .region_add = vhost_vdpa_listener_region_add,
240 .region_del = vhost_vdpa_listener_region_del,
243 static int vhost_vdpa_call(struct vhost_dev *dev, unsigned long int request,
244 void *arg)
246 struct vhost_vdpa *v = dev->opaque;
247 int fd = v->device_fd;
248 int ret;
250 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
252 ret = ioctl(fd, request, arg);
253 return ret < 0 ? -errno : ret;
256 static void vhost_vdpa_add_status(struct vhost_dev *dev, uint8_t status)
258 uint8_t s;
260 trace_vhost_vdpa_add_status(dev, status);
261 if (vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &s)) {
262 return;
265 s |= status;
267 vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &s);
270 static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque, Error **errp)
272 struct vhost_vdpa *v;
273 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
274 trace_vhost_vdpa_init(dev, opaque);
276 v = opaque;
277 v->dev = dev;
278 dev->opaque = opaque ;
279 v->listener = vhost_vdpa_memory_listener;
280 v->msg_type = VHOST_IOTLB_MSG_V2;
282 vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
283 VIRTIO_CONFIG_S_DRIVER);
285 return 0;
288 static void vhost_vdpa_host_notifier_uninit(struct vhost_dev *dev,
289 int queue_index)
291 size_t page_size = qemu_real_host_page_size;
292 struct vhost_vdpa *v = dev->opaque;
293 VirtIODevice *vdev = dev->vdev;
294 VhostVDPAHostNotifier *n;
296 n = &v->notifier[queue_index];
298 if (n->addr) {
299 virtio_queue_set_host_notifier_mr(vdev, queue_index, &n->mr, false);
300 object_unparent(OBJECT(&n->mr));
301 munmap(n->addr, page_size);
302 n->addr = NULL;
306 static void vhost_vdpa_host_notifiers_uninit(struct vhost_dev *dev, int n)
308 int i;
310 for (i = 0; i < n; i++) {
311 vhost_vdpa_host_notifier_uninit(dev, i);
315 static int vhost_vdpa_host_notifier_init(struct vhost_dev *dev, int queue_index)
317 size_t page_size = qemu_real_host_page_size;
318 struct vhost_vdpa *v = dev->opaque;
319 VirtIODevice *vdev = dev->vdev;
320 VhostVDPAHostNotifier *n;
321 int fd = v->device_fd;
322 void *addr;
323 char *name;
325 vhost_vdpa_host_notifier_uninit(dev, queue_index);
327 n = &v->notifier[queue_index];
329 addr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, fd,
330 queue_index * page_size);
331 if (addr == MAP_FAILED) {
332 goto err;
335 name = g_strdup_printf("vhost-vdpa/host-notifier@%p mmaps[%d]",
336 v, queue_index);
337 memory_region_init_ram_device_ptr(&n->mr, OBJECT(vdev), name,
338 page_size, addr);
339 g_free(name);
341 if (virtio_queue_set_host_notifier_mr(vdev, queue_index, &n->mr, true)) {
342 munmap(addr, page_size);
343 goto err;
345 n->addr = addr;
347 return 0;
349 err:
350 return -1;
353 static void vhost_vdpa_host_notifiers_init(struct vhost_dev *dev)
355 int i;
357 for (i = dev->vq_index; i < dev->vq_index + dev->nvqs; i++) {
358 if (vhost_vdpa_host_notifier_init(dev, i)) {
359 goto err;
363 return;
365 err:
366 vhost_vdpa_host_notifiers_uninit(dev, i);
367 return;
370 static int vhost_vdpa_cleanup(struct vhost_dev *dev)
372 struct vhost_vdpa *v;
373 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
374 v = dev->opaque;
375 trace_vhost_vdpa_cleanup(dev, v);
376 vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs);
377 memory_listener_unregister(&v->listener);
379 dev->opaque = NULL;
380 return 0;
383 static int vhost_vdpa_memslots_limit(struct vhost_dev *dev)
385 trace_vhost_vdpa_memslots_limit(dev, INT_MAX);
386 return INT_MAX;
389 static int vhost_vdpa_set_mem_table(struct vhost_dev *dev,
390 struct vhost_memory *mem)
392 trace_vhost_vdpa_set_mem_table(dev, mem->nregions, mem->padding);
393 if (trace_event_get_state_backends(TRACE_VHOST_VDPA_SET_MEM_TABLE) &&
394 trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_REGIONS)) {
395 int i;
396 for (i = 0; i < mem->nregions; i++) {
397 trace_vhost_vdpa_dump_regions(dev, i,
398 mem->regions[i].guest_phys_addr,
399 mem->regions[i].memory_size,
400 mem->regions[i].userspace_addr,
401 mem->regions[i].flags_padding);
404 if (mem->padding) {
405 return -1;
408 return 0;
411 static int vhost_vdpa_set_features(struct vhost_dev *dev,
412 uint64_t features)
414 int ret;
415 trace_vhost_vdpa_set_features(dev, features);
416 ret = vhost_vdpa_call(dev, VHOST_SET_FEATURES, &features);
417 uint8_t status = 0;
418 if (ret) {
419 return ret;
421 vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_FEATURES_OK);
422 vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &status);
424 return !(status & VIRTIO_CONFIG_S_FEATURES_OK);
427 static int vhost_vdpa_set_backend_cap(struct vhost_dev *dev)
429 uint64_t features;
430 uint64_t f = 0x1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2 |
431 0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH;
432 int r;
434 if (vhost_vdpa_call(dev, VHOST_GET_BACKEND_FEATURES, &features)) {
435 return 0;
438 features &= f;
439 r = vhost_vdpa_call(dev, VHOST_SET_BACKEND_FEATURES, &features);
440 if (r) {
441 return 0;
444 dev->backend_cap = features;
446 return 0;
449 static int vhost_vdpa_get_device_id(struct vhost_dev *dev,
450 uint32_t *device_id)
452 int ret;
453 ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_DEVICE_ID, device_id);
454 trace_vhost_vdpa_get_device_id(dev, *device_id);
455 return ret;
458 static int vhost_vdpa_reset_device(struct vhost_dev *dev)
460 int ret;
461 uint8_t status = 0;
463 ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &status);
464 trace_vhost_vdpa_reset_device(dev, status);
465 return ret;
468 static int vhost_vdpa_get_vq_index(struct vhost_dev *dev, int idx)
470 assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);
472 trace_vhost_vdpa_get_vq_index(dev, idx, idx - dev->vq_index);
473 return idx - dev->vq_index;
476 static int vhost_vdpa_set_vring_ready(struct vhost_dev *dev)
478 int i;
479 trace_vhost_vdpa_set_vring_ready(dev);
480 for (i = 0; i < dev->nvqs; ++i) {
481 struct vhost_vring_state state = {
482 .index = dev->vq_index + i,
483 .num = 1,
485 vhost_vdpa_call(dev, VHOST_VDPA_SET_VRING_ENABLE, &state);
487 return 0;
490 static void vhost_vdpa_dump_config(struct vhost_dev *dev, const uint8_t *config,
491 uint32_t config_len)
493 int b, len;
494 char line[QEMU_HEXDUMP_LINE_LEN];
496 for (b = 0; b < config_len; b += 16) {
497 len = config_len - b;
498 qemu_hexdump_line(line, b, config, len, false);
499 trace_vhost_vdpa_dump_config(dev, line);
503 static int vhost_vdpa_set_config(struct vhost_dev *dev, const uint8_t *data,
504 uint32_t offset, uint32_t size,
505 uint32_t flags)
507 struct vhost_vdpa_config *config;
508 int ret;
509 unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
511 trace_vhost_vdpa_set_config(dev, offset, size, flags);
512 config = g_malloc(size + config_size);
513 config->off = offset;
514 config->len = size;
515 memcpy(config->buf, data, size);
516 if (trace_event_get_state_backends(TRACE_VHOST_VDPA_SET_CONFIG) &&
517 trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_CONFIG)) {
518 vhost_vdpa_dump_config(dev, data, size);
520 ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_CONFIG, config);
521 g_free(config);
522 return ret;
525 static int vhost_vdpa_get_config(struct vhost_dev *dev, uint8_t *config,
526 uint32_t config_len, Error **errp)
528 struct vhost_vdpa_config *v_config;
529 unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
530 int ret;
532 trace_vhost_vdpa_get_config(dev, config, config_len);
533 v_config = g_malloc(config_len + config_size);
534 v_config->len = config_len;
535 v_config->off = 0;
536 ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_CONFIG, v_config);
537 memcpy(config, v_config->buf, config_len);
538 g_free(v_config);
539 if (trace_event_get_state_backends(TRACE_VHOST_VDPA_GET_CONFIG) &&
540 trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_CONFIG)) {
541 vhost_vdpa_dump_config(dev, config, config_len);
543 return ret;
546 static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started)
548 struct vhost_vdpa *v = dev->opaque;
549 trace_vhost_vdpa_dev_start(dev, started);
550 if (started) {
551 uint8_t status = 0;
552 memory_listener_register(&v->listener, &address_space_memory);
553 vhost_vdpa_host_notifiers_init(dev);
554 vhost_vdpa_set_vring_ready(dev);
555 vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
556 vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &status);
558 return !(status & VIRTIO_CONFIG_S_DRIVER_OK);
559 } else {
560 vhost_vdpa_reset_device(dev);
561 vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
562 VIRTIO_CONFIG_S_DRIVER);
563 vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs);
564 memory_listener_unregister(&v->listener);
566 return 0;
570 static int vhost_vdpa_set_log_base(struct vhost_dev *dev, uint64_t base,
571 struct vhost_log *log)
573 trace_vhost_vdpa_set_log_base(dev, base, log->size, log->refcnt, log->fd,
574 log->log);
575 return vhost_vdpa_call(dev, VHOST_SET_LOG_BASE, &base);
578 static int vhost_vdpa_set_vring_addr(struct vhost_dev *dev,
579 struct vhost_vring_addr *addr)
581 trace_vhost_vdpa_set_vring_addr(dev, addr->index, addr->flags,
582 addr->desc_user_addr, addr->used_user_addr,
583 addr->avail_user_addr,
584 addr->log_guest_addr);
585 return vhost_vdpa_call(dev, VHOST_SET_VRING_ADDR, addr);
588 static int vhost_vdpa_set_vring_num(struct vhost_dev *dev,
589 struct vhost_vring_state *ring)
591 trace_vhost_vdpa_set_vring_num(dev, ring->index, ring->num);
592 return vhost_vdpa_call(dev, VHOST_SET_VRING_NUM, ring);
595 static int vhost_vdpa_set_vring_base(struct vhost_dev *dev,
596 struct vhost_vring_state *ring)
598 trace_vhost_vdpa_set_vring_base(dev, ring->index, ring->num);
599 return vhost_vdpa_call(dev, VHOST_SET_VRING_BASE, ring);
602 static int vhost_vdpa_get_vring_base(struct vhost_dev *dev,
603 struct vhost_vring_state *ring)
605 int ret;
607 ret = vhost_vdpa_call(dev, VHOST_GET_VRING_BASE, ring);
608 trace_vhost_vdpa_get_vring_base(dev, ring->index, ring->num);
609 return ret;
612 static int vhost_vdpa_set_vring_kick(struct vhost_dev *dev,
613 struct vhost_vring_file *file)
615 trace_vhost_vdpa_set_vring_kick(dev, file->index, file->fd);
616 return vhost_vdpa_call(dev, VHOST_SET_VRING_KICK, file);
619 static int vhost_vdpa_set_vring_call(struct vhost_dev *dev,
620 struct vhost_vring_file *file)
622 trace_vhost_vdpa_set_vring_call(dev, file->index, file->fd);
623 return vhost_vdpa_call(dev, VHOST_SET_VRING_CALL, file);
626 static int vhost_vdpa_get_features(struct vhost_dev *dev,
627 uint64_t *features)
629 int ret;
631 ret = vhost_vdpa_call(dev, VHOST_GET_FEATURES, features);
632 trace_vhost_vdpa_get_features(dev, *features);
633 return ret;
636 static int vhost_vdpa_set_owner(struct vhost_dev *dev)
638 trace_vhost_vdpa_set_owner(dev);
639 return vhost_vdpa_call(dev, VHOST_SET_OWNER, NULL);
642 static int vhost_vdpa_vq_get_addr(struct vhost_dev *dev,
643 struct vhost_vring_addr *addr, struct vhost_virtqueue *vq)
645 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
646 addr->desc_user_addr = (uint64_t)(unsigned long)vq->desc_phys;
647 addr->avail_user_addr = (uint64_t)(unsigned long)vq->avail_phys;
648 addr->used_user_addr = (uint64_t)(unsigned long)vq->used_phys;
649 trace_vhost_vdpa_vq_get_addr(dev, vq, addr->desc_user_addr,
650 addr->avail_user_addr, addr->used_user_addr);
651 return 0;
654 static bool vhost_vdpa_force_iommu(struct vhost_dev *dev)
656 return true;
659 const VhostOps vdpa_ops = {
660 .backend_type = VHOST_BACKEND_TYPE_VDPA,
661 .vhost_backend_init = vhost_vdpa_init,
662 .vhost_backend_cleanup = vhost_vdpa_cleanup,
663 .vhost_set_log_base = vhost_vdpa_set_log_base,
664 .vhost_set_vring_addr = vhost_vdpa_set_vring_addr,
665 .vhost_set_vring_num = vhost_vdpa_set_vring_num,
666 .vhost_set_vring_base = vhost_vdpa_set_vring_base,
667 .vhost_get_vring_base = vhost_vdpa_get_vring_base,
668 .vhost_set_vring_kick = vhost_vdpa_set_vring_kick,
669 .vhost_set_vring_call = vhost_vdpa_set_vring_call,
670 .vhost_get_features = vhost_vdpa_get_features,
671 .vhost_set_backend_cap = vhost_vdpa_set_backend_cap,
672 .vhost_set_owner = vhost_vdpa_set_owner,
673 .vhost_set_vring_endian = NULL,
674 .vhost_backend_memslots_limit = vhost_vdpa_memslots_limit,
675 .vhost_set_mem_table = vhost_vdpa_set_mem_table,
676 .vhost_set_features = vhost_vdpa_set_features,
677 .vhost_reset_device = vhost_vdpa_reset_device,
678 .vhost_get_vq_index = vhost_vdpa_get_vq_index,
679 .vhost_get_config = vhost_vdpa_get_config,
680 .vhost_set_config = vhost_vdpa_set_config,
681 .vhost_requires_shm_log = NULL,
682 .vhost_migration_done = NULL,
683 .vhost_backend_can_merge = NULL,
684 .vhost_net_set_mtu = NULL,
685 .vhost_set_iotlb_callback = NULL,
686 .vhost_send_device_iotlb_msg = NULL,
687 .vhost_dev_start = vhost_vdpa_dev_start,
688 .vhost_get_device_id = vhost_vdpa_get_device_id,
689 .vhost_vq_get_addr = vhost_vdpa_vq_get_addr,
690 .vhost_force_iommu = vhost_vdpa_force_iommu,